Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/klassert/ipsec...
authorDavid S. Miller <davem@davemloft.net>
Thu, 22 May 2014 20:00:00 +0000 (16:00 -0400)
committerDavid S. Miller <davem@davemloft.net>
Thu, 22 May 2014 20:00:00 +0000 (16:00 -0400)
Steffen Klassert says:

====================
pull request (net-next): ipsec-next 2014-05-22

This is the last ipsec pull request before I leave for
a three weeks vacation tomorrow. David, can you please
take urgent ipsec patches directly into net/net-next
during this time?

I'll continue to run the ipsec/ipsec-next trees as soon
as I'm back.

1) Simplify the xfrm audit handling, from Tetsuo Handa.

2) Codingstyle cleanup for xfrm_output, from abian Frederick.

Please pull or let me know if there are problems.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
1364 files changed:
Documentation/DocBook/80211.tmpl
Documentation/devicetree/bindings/arm/arch_timer.txt
Documentation/devicetree/bindings/ata/apm-xgene.txt
Documentation/devicetree/bindings/net/arc_emac.txt
Documentation/devicetree/bindings/net/broadcom-bcmgenet.txt
Documentation/devicetree/bindings/net/broadcom-systemport.txt [new file with mode: 0644]
Documentation/devicetree/bindings/net/cpsw-phy-sel.txt
Documentation/devicetree/bindings/net/fixed-link.txt [new file with mode: 0644]
Documentation/devicetree/bindings/net/fsl-tsec-phy.txt
Documentation/devicetree/bindings/net/ieee802154/at86rf230.txt [new file with mode: 0644]
Documentation/devicetree/bindings/net/mdio-gpio.txt
Documentation/devicetree/bindings/net/micrel-ksz9021.txt [deleted file]
Documentation/devicetree/bindings/net/micrel-ksz90x1.txt [new file with mode: 0644]
Documentation/devicetree/bindings/net/socfpga-dwmac.txt
Documentation/devicetree/bindings/net/stmmac.txt
Documentation/devicetree/bindings/net/via-rhine.txt [new file with mode: 0644]
Documentation/devicetree/bindings/pinctrl/pinctrl-st.txt
Documentation/devicetree/bindings/sound/davinci-mcasp-audio.txt
Documentation/devicetree/bindings/sound/tlv320aic31xx.txt
Documentation/driver-model/devres.txt
Documentation/networking/bonding.txt
Documentation/networking/can.txt
Documentation/networking/cdc_mbim.txt [new file with mode: 0644]
Documentation/networking/filter.txt
Documentation/networking/scaling.txt
MAINTAINERS
Makefile
arch/arc/kernel/entry.S
arch/arm/Kconfig
arch/arm/Kconfig.debug
arch/arm/boot/dts/Makefile
arch/arm/boot/dts/am335x-bone-common.dtsi
arch/arm/boot/dts/am335x-evm.dts
arch/arm/boot/dts/am335x-evmsk.dts
arch/arm/boot/dts/am335x-igep0033.dtsi
arch/arm/boot/dts/am33xx.dtsi
arch/arm/boot/dts/am4372.dtsi
arch/arm/boot/dts/armada-370-xp.dtsi
arch/arm/boot/dts/armada-38x.dtsi
arch/arm/boot/dts/armada-xp-matrix.dts
arch/arm/boot/dts/dra7.dtsi
arch/arm/boot/dts/dra7xx-clocks.dtsi
arch/arm/boot/dts/imx25.dtsi
arch/arm/boot/dts/imx27-apf27.dts
arch/arm/boot/dts/imx27.dtsi
arch/arm/boot/dts/imx50.dtsi
arch/arm/boot/dts/imx51.dtsi
arch/arm/boot/dts/imx53-m53evk.dts
arch/arm/boot/dts/imx53-qsb-common.dtsi
arch/arm/boot/dts/imx53-tx53-x03x.dts
arch/arm/boot/dts/imx53.dtsi
arch/arm/boot/dts/imx6q-dmo-edmqmx6.dts
arch/arm/boot/dts/imx6q-gw5400-a.dts
arch/arm/boot/dts/imx6qdl-gw52xx.dtsi
arch/arm/boot/dts/imx6qdl-microsom-ar8035.dtsi
arch/arm/boot/dts/imx6qdl.dtsi
arch/arm/boot/dts/imx6sl-evk.dts
arch/arm/boot/dts/imx6sl.dtsi
arch/arm/boot/dts/kirkwood-b3.dts
arch/arm/boot/dts/kirkwood-cloudbox.dts
arch/arm/boot/dts/kirkwood-dreamplug.dts
arch/arm/boot/dts/kirkwood-laplug.dts
arch/arm/boot/dts/kirkwood-mv88f6281gtw-ge.dts
arch/arm/boot/dts/kirkwood-ns2-common.dtsi
arch/arm/boot/dts/kirkwood-nsa310.dts
arch/arm/boot/dts/kirkwood-nsa310a.dts
arch/arm/boot/dts/kirkwood-openblocks_a6.dts
arch/arm/boot/dts/kirkwood-openblocks_a7.dts
arch/arm/boot/dts/omap3-beagle-xm-ab.dts [new file with mode: 0644]
arch/arm/boot/dts/omap3-devkit8000.dts
arch/arm/boot/dts/omap3-lilly-a83x.dtsi
arch/arm/boot/dts/omap3.dtsi
arch/arm/boot/dts/omap4.dtsi
arch/arm/boot/dts/omap5.dtsi
arch/arm/boot/dts/r8a7740.dtsi
arch/arm/boot/dts/r8a7790-lager.dts
arch/arm/boot/dts/r8a7791-koelsch.dts
arch/arm/boot/dts/rk3188.dtsi
arch/arm/boot/dts/sh73a0.dtsi
arch/arm/boot/dts/stih415-pinctrl.dtsi
arch/arm/boot/dts/stih416-pinctrl.dtsi
arch/arm/boot/dts/tegra124.dtsi
arch/arm/boot/dts/vf610-twr.dts
arch/arm/boot/dts/vf610.dtsi
arch/arm/boot/dts/vt8500.dtsi
arch/arm/boot/dts/wm8650.dtsi
arch/arm/boot/dts/wm8850.dtsi
arch/arm/boot/dts/zynq-7000.dtsi
arch/arm/boot/dts/zynq-zc702.dts
arch/arm/boot/dts/zynq-zc706.dts
arch/arm/common/bL_switcher.c
arch/arm/common/mcpm_entry.c
arch/arm/configs/omap2plus_defconfig
arch/arm/configs/u300_defconfig
arch/arm/configs/u8500_defconfig
arch/arm/include/asm/cputype.h
arch/arm/include/asm/div64.h
arch/arm/include/asm/mcpm.h
arch/arm/include/asm/tlb.h
arch/arm/include/uapi/asm/unistd.h
arch/arm/kernel/Makefile
arch/arm/kernel/calls.S
arch/arm/kernel/head.S
arch/arm/kernel/iwmmxt.S
arch/arm/kernel/machine_kexec.c
arch/arm/kernel/pj4-cp0.c
arch/arm/kernel/sys_oabi-compat.c
arch/arm/kvm/Kconfig
arch/arm/kvm/mmu.c
arch/arm/mach-at91/at91sam9260_devices.c
arch/arm/mach-at91/at91sam9g45_devices.c
arch/arm/mach-imx/clk-imx6q.c
arch/arm/mach-omap2/board-rx51-video.c
arch/arm/mach-omap2/clkt_dpll.c
arch/arm/mach-omap2/gpmc.c
arch/arm/mach-omap2/omap_hwmod.c
arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
arch/arm/mach-omap2/pm34xx.c
arch/arm/mach-pxa/include/mach/hx4700.h
arch/arm/mach-rockchip/platsmp.c
arch/arm/mach-shmobile/board-armadillo800eva.c
arch/arm/mach-shmobile/board-lager.c
arch/arm/mach-shmobile/clock-r8a7778.c
arch/arm/mach-spear/time.c
arch/arm/mach-tegra/Kconfig
arch/arm/mach-vexpress/dcscb.c
arch/arm/mach-vexpress/spc.c
arch/arm/mm/Kconfig
arch/arm/mm/dma-mapping.c
arch/arm/vfp/vfpdouble.c
arch/arm/vfp/vfpsingle.c
arch/arm64/Kconfig
arch/arm64/boot/dts/apm-storm.dtsi
arch/arm64/include/asm/mmu.h
arch/arm64/include/asm/tlb.h
arch/arm64/include/asm/unistd32.h
arch/arm64/kernel/debug-monitors.c
arch/arm64/kernel/early_printk.c
arch/arm64/kernel/setup.c
arch/arm64/kernel/time.c
arch/arm64/mm/dma-mapping.c
arch/arm64/mm/mmu.c
arch/hexagon/include/asm/barrier.h [deleted file]
arch/ia64/include/asm/tlb.h
arch/mips/cavium-octeon/octeon-irq.c
arch/parisc/include/uapi/asm/Kbuild
arch/parisc/include/uapi/asm/resource.h [deleted file]
arch/powerpc/boot/main.c
arch/powerpc/boot/ops.h
arch/powerpc/boot/ps3.c
arch/powerpc/include/asm/opal.h
arch/powerpc/include/uapi/asm/setup.h
arch/powerpc/kernel/ppc_ksyms.c
arch/powerpc/kernel/rtas_flash.c
arch/powerpc/kvm/book3s_hv_rmhandlers.S
arch/powerpc/mm/hash_native_64.c
arch/powerpc/perf/hv-24x7.c
arch/powerpc/perf/hv-gpci.c
arch/powerpc/platforms/powernv/opal-dump.c
arch/powerpc/platforms/powernv/opal-elog.c
arch/powerpc/platforms/powernv/opal-flash.c
arch/powerpc/platforms/powernv/opal-sysparam.c
arch/powerpc/platforms/powernv/opal.c
arch/powerpc/platforms/powernv/pci-ioda.c
arch/powerpc/platforms/powernv/setup.c
arch/powerpc/platforms/powernv/smp.c
arch/powerpc/platforms/pseries/hotplug-cpu.c
arch/powerpc/platforms/pseries/hotplug-memory.c
arch/powerpc/sysdev/fsl_soc.c
arch/powerpc/sysdev/ppc4xx_pci.c
arch/s390/include/asm/ccwgroup.h
arch/s390/include/asm/tlb.h
arch/s390/net/bpf_jit_comp.c
arch/sh/include/asm/tlb.h
arch/sparc/include/asm/checksum_32.h
arch/sparc/include/asm/checksum_64.h
arch/um/include/asm/tlb.h
arch/um/include/shared/os.h
arch/um/kernel/physmem.c
arch/um/os-Linux/file.c
arch/um/os-Linux/main.c
arch/um/os-Linux/mem.c
arch/x86/Makefile
arch/x86/include/asm/checksum_64.h
arch/x86/kernel/apic/io_apic.c
arch/x86/kernel/cpu/perf_event_intel_rapl.c
arch/x86/kernel/vsmp_64.c
arch/x86/kvm/vmx.c
arch/x86/net/bpf_jit.S
arch/x86/net/bpf_jit_comp.c
arch/x86/vdso/vdso-layout.lds.S
arch/xtensa/Kconfig
arch/xtensa/boot/dts/kc705.dts [new file with mode: 0644]
arch/xtensa/boot/dts/xtfpga-flash-128m.dtsi [new file with mode: 0644]
arch/xtensa/boot/dts/xtfpga-flash-16m.dtsi
arch/xtensa/boot/dts/xtfpga-flash-4m.dtsi
arch/xtensa/boot/dts/xtfpga.dtsi
arch/xtensa/include/asm/bootparam.h
arch/xtensa/include/asm/fixmap.h [new file with mode: 0644]
arch/xtensa/include/asm/highmem.h
arch/xtensa/include/asm/pgtable.h
arch/xtensa/include/asm/sysmem.h [new file with mode: 0644]
arch/xtensa/include/asm/tlbflush.h
arch/xtensa/kernel/setup.c
arch/xtensa/kernel/smp.c
arch/xtensa/kernel/xtensa_ksyms.c
arch/xtensa/mm/Makefile
arch/xtensa/mm/cache.c
arch/xtensa/mm/highmem.c [new file with mode: 0644]
arch/xtensa/mm/init.c
arch/xtensa/mm/mmu.c
arch/xtensa/mm/tlb.c
arch/xtensa/platforms/iss/Makefile
arch/xtensa/platforms/xt2000/setup.c
crypto/crypto_user.c
drivers/acpi/acpi_processor.c
drivers/acpi/acpica/exfield.c
drivers/acpi/bus.c
drivers/acpi/ec.c
drivers/ata/Kconfig
drivers/ata/ahci.c
drivers/ata/ahci.h
drivers/ata/libata-core.c
drivers/ata/pata_arasan_cf.c
drivers/ata/pata_at91.c
drivers/ata/pata_samsung_cf.c
drivers/atm/idt77252.c
drivers/base/dd.c
drivers/base/platform.c
drivers/block/floppy.c
drivers/bluetooth/ath3k.c
drivers/bluetooth/btmrvl_drv.h
drivers/bluetooth/btmrvl_main.c
drivers/bluetooth/btmrvl_sdio.c
drivers/bluetooth/btmrvl_sdio.h
drivers/bluetooth/btusb.c
drivers/bluetooth/hci_h4.c
drivers/clk/tegra/clk-tegra124.c
drivers/clk/ti/clk-43xx.c
drivers/clk/versatile/clk-vexpress-osc.c
drivers/clocksource/arm_arch_timer.c
drivers/clocksource/exynos_mct.c
drivers/clocksource/zevio-timer.c
drivers/connector/cn_proc.c
drivers/cpufreq/Kconfig.arm
drivers/cpufreq/longhaul.c
drivers/cpufreq/powernow-k6.c
drivers/cpufreq/powernow-k7.c
drivers/cpufreq/powernv-cpufreq.c
drivers/cpufreq/ppc-corenet-cpufreq.c
drivers/cpufreq/unicore2-cpufreq.c
drivers/dma/Kconfig
drivers/dma/edma.c
drivers/dma/fsl-edma.c
drivers/dma/sirf-dma.c
drivers/gpio/gpiolib-acpi.c
drivers/gpio/gpiolib.c
drivers/gpu/drm/exynos/exynos_drm_crtc.c
drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
drivers/gpu/drm/exynos/exynos_drm_dsi.c
drivers/gpu/drm/exynos/exynos_drm_vidi.c
drivers/gpu/drm/i915/i915_gem_gtt.c
drivers/gpu/drm/i915/i915_irq.c
drivers/gpu/drm/i915/i915_reg.h
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_dp.c
drivers/gpu/drm/i915/intel_drv.h
drivers/gpu/drm/i915/intel_fbdev.c
drivers/gpu/drm/i915/intel_hdmi.c
drivers/gpu/drm/i915/intel_ringbuffer.c
drivers/gpu/drm/i915/intel_ringbuffer.h
drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c
drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h
drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c
drivers/gpu/drm/msm/msm_fbdev.c
drivers/gpu/drm/msm/msm_gem.c
drivers/gpu/drm/radeon/atombios_dp.c
drivers/gpu/drm/radeon/cik_sdma.c
drivers/gpu/drm/radeon/r600_dpm.c
drivers/gpu/drm/radeon/radeon_atpx_handler.c
drivers/gpu/drm/radeon/radeon_display.c
drivers/gpu/drm/radeon/radeon_kms.c
drivers/gpu/drm/radeon/radeon_pm.c
drivers/gpu/drm/tegra/dc.c
drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
drivers/hwmon/coretemp.c
drivers/hwmon/ltc2945.c
drivers/hwmon/vexpress.c
drivers/idle/intel_idle.c
drivers/iio/adc/Kconfig
drivers/iio/adc/at91_adc.c
drivers/iio/adc/exynos_adc.c
drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
drivers/iio/industrialio-buffer.c
drivers/iio/light/cm32181.c
drivers/iio/light/cm36651.c
drivers/infiniband/hw/cxgb4/Kconfig
drivers/infiniband/hw/cxgb4/cm.c
drivers/infiniband/hw/cxgb4/iw_cxgb4.h
drivers/infiniband/hw/cxgb4/qp.c
drivers/infiniband/hw/cxgb4/t4fw_ri_api.h
drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
drivers/input/misc/da9055_onkey.c
drivers/input/misc/soc_button_array.c
drivers/input/mouse/elantech.c
drivers/input/mouse/synaptics.c
drivers/input/serio/i8042-x86ia64io.h
drivers/input/serio/i8042.c
drivers/input/serio/serio.c
drivers/input/tablet/wacom_sys.c
drivers/input/tablet/wacom_wac.c
drivers/input/touchscreen/ads7846.c
drivers/iommu/arm-smmu.c
drivers/iommu/dmar.c
drivers/iommu/intel-iommu.c
drivers/irqchip/irq-armada-370-xp.c
drivers/irqchip/irq-crossbar.c
drivers/irqchip/irq-gic.c
drivers/isdn/hisax/hfc4s8s_l1.c
drivers/isdn/hisax/icc.c
drivers/isdn/mISDN/l1oip_core.c
drivers/md/dm-cache-target.c
drivers/md/dm-thin.c
drivers/md/dm-verity.c
drivers/media/platform/Kconfig
drivers/mtd/ubi/block.c
drivers/mtd/ubi/wl.c
drivers/net/bonding/bond_3ad.c
drivers/net/bonding/bond_alb.c
drivers/net/bonding/bond_alb.h
drivers/net/bonding/bond_debugfs.c
drivers/net/bonding/bond_main.c
drivers/net/bonding/bond_netlink.c
drivers/net/bonding/bond_options.c
drivers/net/bonding/bond_options.h
drivers/net/bonding/bond_procfs.c
drivers/net/bonding/bond_sysfs.c
drivers/net/bonding/bond_sysfs_slave.c
drivers/net/bonding/bonding.h
drivers/net/can/Kconfig
drivers/net/can/Makefile
drivers/net/can/c_can/Kconfig
drivers/net/can/c_can/c_can.c
drivers/net/can/c_can/c_can.h
drivers/net/can/c_can/c_can_pci.c
drivers/net/can/c_can/c_can_platform.c
drivers/net/can/dev.c
drivers/net/can/mcp251x.c [deleted file]
drivers/net/can/mscan/Kconfig
drivers/net/can/rcar_can.c [new file with mode: 0644]
drivers/net/can/sja1000/sja1000_isa.c
drivers/net/can/slcan.c
drivers/net/can/softing/softing_main.c
drivers/net/can/spi/Kconfig [new file with mode: 0644]
drivers/net/can/spi/Makefile [new file with mode: 0644]
drivers/net/can/spi/mcp251x.c [new file with mode: 0644]
drivers/net/can/usb/Kconfig
drivers/net/can/usb/Makefile
drivers/net/can/usb/gs_usb.c [new file with mode: 0644]
drivers/net/can/usb/kvaser_usb.c
drivers/net/dsa/mv88e6123_61_65.c
drivers/net/dsa/mv88e6131.c
drivers/net/dsa/mv88e6xxx.c
drivers/net/ethernet/3com/3c509.c
drivers/net/ethernet/3com/3c589_cs.c
drivers/net/ethernet/3com/typhoon.c
drivers/net/ethernet/8390/ax88796.c
drivers/net/ethernet/Kconfig
drivers/net/ethernet/Makefile
drivers/net/ethernet/adaptec/starfire.c
drivers/net/ethernet/alteon/acenic.c
drivers/net/ethernet/altera/Kconfig
drivers/net/ethernet/altera/altera_msgdma.c
drivers/net/ethernet/altera/altera_msgdma.h
drivers/net/ethernet/altera/altera_sgdma.c
drivers/net/ethernet/altera/altera_sgdma.h
drivers/net/ethernet/altera/altera_tse.h
drivers/net/ethernet/altera/altera_tse_ethtool.c
drivers/net/ethernet/altera/altera_tse_main.c
drivers/net/ethernet/amd/amd8111e.c
drivers/net/ethernet/amd/ariadne.c
drivers/net/ethernet/amd/au1000_eth.c
drivers/net/ethernet/amd/nmclan_cs.c
drivers/net/ethernet/arc/emac.h
drivers/net/ethernet/arc/emac_main.c
drivers/net/ethernet/atheros/alx/main.c
drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c
drivers/net/ethernet/atheros/atl1e/atl1e_ethtool.c
drivers/net/ethernet/atheros/atlx/atl2.c
drivers/net/ethernet/broadcom/Kconfig
drivers/net/ethernet/broadcom/Makefile
drivers/net/ethernet/broadcom/b44.c
drivers/net/ethernet/broadcom/bcm63xx_enet.c
drivers/net/ethernet/broadcom/bcmsysport.c [new file with mode: 0644]
drivers/net/ethernet/broadcom/bcmsysport.h [new file with mode: 0644]
drivers/net/ethernet/broadcom/bgmac.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
drivers/net/ethernet/broadcom/genet/bcmgenet.c
drivers/net/ethernet/broadcom/genet/bcmmii.c
drivers/net/ethernet/broadcom/tg3.c
drivers/net/ethernet/broadcom/tg3.h
drivers/net/ethernet/brocade/bna/bnad_ethtool.c
drivers/net/ethernet/cadence/Kconfig
drivers/net/ethernet/cadence/macb.c
drivers/net/ethernet/calxeda/xgmac.c
drivers/net/ethernet/chelsio/Kconfig
drivers/net/ethernet/chelsio/cxgb/cxgb2.c
drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
drivers/net/ethernet/chelsio/cxgb4/sge.c
drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
drivers/net/ethernet/chelsio/cxgb4vf/sge.c
drivers/net/ethernet/cisco/enic/enic.h
drivers/net/ethernet/cisco/enic/enic_ethtool.c
drivers/net/ethernet/cisco/enic/enic_main.c
drivers/net/ethernet/cisco/enic/vnic_cq.h
drivers/net/ethernet/dec/tulip/tulip_core.c
drivers/net/ethernet/dlink/dl2k.c
drivers/net/ethernet/dlink/sundance.c
drivers/net/ethernet/ec_bhf.c [new file with mode: 0644]
drivers/net/ethernet/emulex/benet/be.h
drivers/net/ethernet/emulex/benet/be_cmds.c
drivers/net/ethernet/emulex/benet/be_cmds.h
drivers/net/ethernet/emulex/benet/be_ethtool.c
drivers/net/ethernet/emulex/benet/be_main.c
drivers/net/ethernet/faraday/ftgmac100.c
drivers/net/ethernet/faraday/ftmac100.c
drivers/net/ethernet/freescale/fec_main.c
drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
drivers/net/ethernet/freescale/gianfar.c
drivers/net/ethernet/freescale/gianfar_ethtool.c
drivers/net/ethernet/freescale/ucc_geth.c
drivers/net/ethernet/freescale/ucc_geth_ethtool.c
drivers/net/ethernet/fujitsu/fmvj18x_cs.c
drivers/net/ethernet/ibm/ehea/ehea_ethtool.c
drivers/net/ethernet/ibm/ehea/ehea_qmr.c
drivers/net/ethernet/ibm/emac/core.c
drivers/net/ethernet/icplus/ipg.c
drivers/net/ethernet/intel/e100.c
drivers/net/ethernet/intel/e1000/e1000_ethtool.c
drivers/net/ethernet/intel/e1000e/e1000.h
drivers/net/ethernet/intel/e1000e/ethtool.c
drivers/net/ethernet/intel/e1000e/ich8lan.c
drivers/net/ethernet/intel/e1000e/ich8lan.h
drivers/net/ethernet/intel/e1000e/netdev.c
drivers/net/ethernet/intel/e1000e/nvm.c
drivers/net/ethernet/intel/e1000e/param.c
drivers/net/ethernet/intel/e1000e/phy.c
drivers/net/ethernet/intel/e1000e/phy.h
drivers/net/ethernet/intel/i40e/i40e.h
drivers/net/ethernet/intel/i40e/i40e_adminq.c
drivers/net/ethernet/intel/i40e/i40e_adminq.h
drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
drivers/net/ethernet/intel/i40e/i40e_common.c
drivers/net/ethernet/intel/i40e/i40e_debugfs.c
drivers/net/ethernet/intel/i40e/i40e_ethtool.c
drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c
drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h
drivers/net/ethernet/intel/i40e/i40e_main.c
drivers/net/ethernet/intel/i40e/i40e_nvm.c
drivers/net/ethernet/intel/i40e/i40e_prototype.h
drivers/net/ethernet/intel/i40e/i40e_ptp.c
drivers/net/ethernet/intel/i40e/i40e_txrx.c
drivers/net/ethernet/intel/i40e/i40e_type.h
drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
drivers/net/ethernet/intel/i40evf/Makefile
drivers/net/ethernet/intel/i40evf/i40e_adminq.c
drivers/net/ethernet/intel/i40evf/i40e_adminq.h
drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
drivers/net/ethernet/intel/i40evf/i40e_alloc.h
drivers/net/ethernet/intel/i40evf/i40e_common.c
drivers/net/ethernet/intel/i40evf/i40e_hmc.h
drivers/net/ethernet/intel/i40evf/i40e_lan_hmc.h
drivers/net/ethernet/intel/i40evf/i40e_osdep.h
drivers/net/ethernet/intel/i40evf/i40e_prototype.h
drivers/net/ethernet/intel/i40evf/i40e_register.h
drivers/net/ethernet/intel/i40evf/i40e_status.h
drivers/net/ethernet/intel/i40evf/i40e_txrx.c
drivers/net/ethernet/intel/i40evf/i40e_txrx.h
drivers/net/ethernet/intel/i40evf/i40e_type.h
drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h
drivers/net/ethernet/intel/i40evf/i40evf.h
drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
drivers/net/ethernet/intel/i40evf/i40evf_main.c
drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
drivers/net/ethernet/intel/igb/e1000_82575.c
drivers/net/ethernet/intel/igb/e1000_82575.h
drivers/net/ethernet/intel/igb/e1000_defines.h
drivers/net/ethernet/intel/igb/e1000_hw.h
drivers/net/ethernet/intel/igb/e1000_i210.c
drivers/net/ethernet/intel/igb/e1000_i210.h
drivers/net/ethernet/intel/igb/e1000_mac.c
drivers/net/ethernet/intel/igb/e1000_mac.h
drivers/net/ethernet/intel/igb/e1000_mbx.c
drivers/net/ethernet/intel/igb/e1000_mbx.h
drivers/net/ethernet/intel/igb/e1000_nvm.c
drivers/net/ethernet/intel/igb/e1000_nvm.h
drivers/net/ethernet/intel/igb/e1000_phy.c
drivers/net/ethernet/intel/igb/e1000_phy.h
drivers/net/ethernet/intel/igb/e1000_regs.h
drivers/net/ethernet/intel/igb/igb.h
drivers/net/ethernet/intel/igb/igb_ethtool.c
drivers/net/ethernet/intel/igb/igb_hwmon.c
drivers/net/ethernet/intel/igb/igb_main.c
drivers/net/ethernet/intel/igb/igb_ptp.c
drivers/net/ethernet/intel/igbvf/ethtool.c
drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c
drivers/net/ethernet/intel/ixgbe/ixgbe.h
drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c
drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c
drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
drivers/net/ethernet/intel/ixgbevf/ethtool.c
drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
drivers/net/ethernet/jme.c
drivers/net/ethernet/marvell/mv643xx_eth.c
drivers/net/ethernet/marvell/mvmdio.c
drivers/net/ethernet/marvell/mvneta.c
drivers/net/ethernet/marvell/pxa168_eth.c
drivers/net/ethernet/marvell/sky2.c
drivers/net/ethernet/mellanox/mlx4/cmd.c
drivers/net/ethernet/mellanox/mlx4/cq.c
drivers/net/ethernet/mellanox/mlx4/en_cq.c
drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
drivers/net/ethernet/mellanox/mlx4/en_main.c
drivers/net/ethernet/mellanox/mlx4/en_netdev.c
drivers/net/ethernet/mellanox/mlx4/en_rx.c
drivers/net/ethernet/mellanox/mlx4/en_tx.c
drivers/net/ethernet/mellanox/mlx4/eq.c
drivers/net/ethernet/mellanox/mlx4/fw.c
drivers/net/ethernet/mellanox/mlx4/main.c
drivers/net/ethernet/mellanox/mlx4/mcg.c
drivers/net/ethernet/mellanox/mlx4/mlx4.h
drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
drivers/net/ethernet/mellanox/mlx4/mr.c
drivers/net/ethernet/mellanox/mlx4/port.c
drivers/net/ethernet/mellanox/mlx4/profile.c
drivers/net/ethernet/mellanox/mlx4/qp.c
drivers/net/ethernet/mellanox/mlx4/reset.c
drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
drivers/net/ethernet/mellanox/mlx5/core/cmd.c
drivers/net/ethernet/mellanox/mlx5/core/eq.c
drivers/net/ethernet/mellanox/mlx5/core/main.c
drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
drivers/net/ethernet/mellanox/mlx5/core/mr.c
drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
drivers/net/ethernet/mellanox/mlx5/core/qp.c
drivers/net/ethernet/micrel/ks8695net.c
drivers/net/ethernet/micrel/ks8851.c
drivers/net/ethernet/micrel/ksz884x.c
drivers/net/ethernet/microchip/enc28j60.c
drivers/net/ethernet/myricom/myri10ge/myri10ge.c
drivers/net/ethernet/natsemi/natsemi.c
drivers/net/ethernet/natsemi/ns83820.c
drivers/net/ethernet/neterion/s2io.c
drivers/net/ethernet/neterion/vxge/vxge-ethtool.c
drivers/net/ethernet/neterion/vxge/vxge-main.c
drivers/net/ethernet/nvidia/forcedeth.c
drivers/net/ethernet/oki-semi/pch_gbe/Kconfig
drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c
drivers/net/ethernet/packetengines/hamachi.c
drivers/net/ethernet/packetengines/yellowfin.c
drivers/net/ethernet/qlogic/Kconfig
drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
drivers/net/ethernet/qlogic/qla3xxx.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h
drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
drivers/net/ethernet/qlogic/qlge/qlge_main.c
drivers/net/ethernet/realtek/r8169.c
drivers/net/ethernet/renesas/sh_eth.c
drivers/net/ethernet/samsung/sxgbe/sxgbe_common.h
drivers/net/ethernet/samsung/sxgbe/sxgbe_core.c
drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.c
drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.h
drivers/net/ethernet/samsung/sxgbe/sxgbe_dma.c
drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c
drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
drivers/net/ethernet/samsung/sxgbe/sxgbe_mdio.c
drivers/net/ethernet/samsung/sxgbe/sxgbe_reg.h
drivers/net/ethernet/sfc/efx.c
drivers/net/ethernet/sis/sis190.c
drivers/net/ethernet/smsc/smc91c92_cs.c
drivers/net/ethernet/smsc/smc91x.c
drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
drivers/net/ethernet/sun/cassini.c
drivers/net/ethernet/tehuti/tehuti.c
drivers/net/ethernet/ti/cpsw-phy-sel.c
drivers/net/ethernet/ti/cpsw.c
drivers/net/ethernet/ti/cpts.c
drivers/net/ethernet/ti/davinci_cpdma.c
drivers/net/ethernet/ti/davinci_emac.c
drivers/net/ethernet/ti/davinci_mdio.c
drivers/net/ethernet/via/Kconfig
drivers/net/ethernet/via/via-rhine.c
drivers/net/ethernet/xilinx/ll_temac_main.c
drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c
drivers/net/ethernet/xilinx/xilinx_emaclite.c
drivers/net/hyperv/hyperv_net.h
drivers/net/hyperv/netvsc.c
drivers/net/hyperv/netvsc_drv.c
drivers/net/hyperv/rndis_filter.c
drivers/net/ieee802154/at86rf230.c
drivers/net/ieee802154/fakelb.c
drivers/net/irda/Kconfig
drivers/net/irda/w83977af_ir.c
drivers/net/macvlan.c
drivers/net/macvtap.c
drivers/net/ntb_netdev.c
drivers/net/phy/at803x.c
drivers/net/phy/fixed.c
drivers/net/phy/mdio-gpio.c
drivers/net/phy/mdio_bus.c
drivers/net/phy/micrel.c
drivers/net/phy/phy.c
drivers/net/phy/phy_device.c
drivers/net/phy/smsc.c
drivers/net/phy/vitesse.c
drivers/net/rionet.c
drivers/net/slip/slip.c
drivers/net/team/team.c
drivers/net/tun.c
drivers/net/usb/catc.c
drivers/net/usb/cdc_mbim.c
drivers/net/usb/cdc_ncm.c
drivers/net/usb/hso.c
drivers/net/usb/huawei_cdc_ncm.c
drivers/net/usb/ipheth.c
drivers/net/usb/kaweth.c
drivers/net/usb/pegasus.c
drivers/net/usb/qmi_wwan.c
drivers/net/usb/r8152.c
drivers/net/usb/rtl8150.c
drivers/net/virtio_net.c
drivers/net/vmxnet3/vmxnet3_ethtool.c
drivers/net/vxlan.c
drivers/net/wan/sdla.c
drivers/net/wimax/i2400m/driver.c
drivers/net/wireless/ath/ar5523/ar5523.c
drivers/net/wireless/ath/ath10k/bmi.c
drivers/net/wireless/ath/ath10k/bmi.h
drivers/net/wireless/ath/ath10k/ce.c
drivers/net/wireless/ath/ath10k/ce.h
drivers/net/wireless/ath/ath10k/core.c
drivers/net/wireless/ath/ath10k/core.h
drivers/net/wireless/ath/ath10k/debug.c
drivers/net/wireless/ath/ath10k/htc.c
drivers/net/wireless/ath/ath10k/htt.h
drivers/net/wireless/ath/ath10k/htt_rx.c
drivers/net/wireless/ath/ath10k/hw.h
drivers/net/wireless/ath/ath10k/mac.c
drivers/net/wireless/ath/ath10k/pci.c
drivers/net/wireless/ath/ath10k/pci.h
drivers/net/wireless/ath/ath10k/txrx.c
drivers/net/wireless/ath/ath10k/txrx.h
drivers/net/wireless/ath/ath10k/wmi.c
drivers/net/wireless/ath/ath10k/wmi.h
drivers/net/wireless/ath/ath5k/phy.c
drivers/net/wireless/ath/ath6kl/Kconfig
drivers/net/wireless/ath/ath6kl/cfg80211.c
drivers/net/wireless/ath/ath6kl/core.c
drivers/net/wireless/ath/ath6kl/debug.c
drivers/net/wireless/ath/ath6kl/debug.h
drivers/net/wireless/ath/ath6kl/hif.c
drivers/net/wireless/ath/ath6kl/hif.h
drivers/net/wireless/ath/ath6kl/htc_mbox.c
drivers/net/wireless/ath/ath6kl/htc_pipe.c
drivers/net/wireless/ath/ath6kl/init.c
drivers/net/wireless/ath/ath6kl/main.c
drivers/net/wireless/ath/ath6kl/sdio.c
drivers/net/wireless/ath/ath6kl/target.h
drivers/net/wireless/ath/ath6kl/txrx.c
drivers/net/wireless/ath/ath6kl/usb.c
drivers/net/wireless/ath/ath6kl/wmi.c
drivers/net/wireless/ath/ath6kl/wmi.h
drivers/net/wireless/ath/ath9k/ahb.c
drivers/net/wireless/ath/ath9k/ani.c
drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
drivers/net/wireless/ath/ath9k/ar9330_1p1_initvals.h
drivers/net/wireless/ath/ath9k/ar9330_1p2_initvals.h
drivers/net/wireless/ath/ath9k/ar9340_initvals.h
drivers/net/wireless/ath/ath9k/ar953x_initvals.h
drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h
drivers/net/wireless/ath/ath9k/ath9k.h
drivers/net/wireless/ath/ath9k/debug_sta.c
drivers/net/wireless/ath/ath9k/init.c
drivers/net/wireless/ath/ath9k/main.c
drivers/net/wireless/ath/ath9k/pci.c
drivers/net/wireless/ath/ath9k/recv.c
drivers/net/wireless/ath/ath9k/xmit.c
drivers/net/wireless/ath/carl9170/main.c
drivers/net/wireless/ath/wil6210/interrupt.c
drivers/net/wireless/ath/wil6210/main.c
drivers/net/wireless/ath/wil6210/pcie_bus.c
drivers/net/wireless/ath/wil6210/rx_reorder.c
drivers/net/wireless/ath/wil6210/wil6210.h
drivers/net/wireless/ath/wil6210/wmi.c
drivers/net/wireless/ath/wil6210/wmi.h
drivers/net/wireless/b43/b43.h
drivers/net/wireless/b43/main.c
drivers/net/wireless/b43/phy_common.c
drivers/net/wireless/b43/phy_g.c
drivers/net/wireless/b43/phy_n.c
drivers/net/wireless/b43/radio_2056.c
drivers/net/wireless/b43/tables_nphy.c
drivers/net/wireless/b43/tables_nphy.h
drivers/net/wireless/b43/wa.c
drivers/net/wireless/brcm80211/brcmfmac/chip.c
drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
drivers/net/wireless/cw1200/sta.c
drivers/net/wireless/cw1200/sta.h
drivers/net/wireless/hostap/hostap_main.c
drivers/net/wireless/iwlegacy/3945.c
drivers/net/wireless/iwlegacy/4965-mac.c
drivers/net/wireless/iwlegacy/common.c
drivers/net/wireless/iwlegacy/common.h
drivers/net/wireless/iwlwifi/dvm/lib.c
drivers/net/wireless/iwlwifi/dvm/mac80211.c
drivers/net/wireless/iwlwifi/dvm/main.c
drivers/net/wireless/iwlwifi/iwl-1000.c
drivers/net/wireless/iwlwifi/iwl-2000.c
drivers/net/wireless/iwlwifi/iwl-5000.c
drivers/net/wireless/iwlwifi/iwl-6000.c
drivers/net/wireless/iwlwifi/iwl-7000.c
drivers/net/wireless/iwlwifi/iwl-config.h
drivers/net/wireless/iwlwifi/iwl-fw.h
drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
drivers/net/wireless/iwlwifi/iwl-op-mode.h
drivers/net/wireless/iwlwifi/iwl-prph.h
drivers/net/wireless/iwlwifi/iwl-trans.h
drivers/net/wireless/iwlwifi/mvm/coex.c
drivers/net/wireless/iwlwifi/mvm/d3.c
drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c
drivers/net/wireless/iwlwifi/mvm/debugfs.c
drivers/net/wireless/iwlwifi/mvm/fw-api-coex.h
drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h
drivers/net/wireless/iwlwifi/mvm/fw-api-rs.h
drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
drivers/net/wireless/iwlwifi/mvm/fw-api-sta.h
drivers/net/wireless/iwlwifi/mvm/fw-api-tx.h
drivers/net/wireless/iwlwifi/mvm/fw-api.h
drivers/net/wireless/iwlwifi/mvm/fw-error-dump.h
drivers/net/wireless/iwlwifi/mvm/fw.c
drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
drivers/net/wireless/iwlwifi/mvm/mac80211.c
drivers/net/wireless/iwlwifi/mvm/mvm.h
drivers/net/wireless/iwlwifi/mvm/ops.c
drivers/net/wireless/iwlwifi/mvm/power.c
drivers/net/wireless/iwlwifi/mvm/rs.c
drivers/net/wireless/iwlwifi/mvm/rs.h
drivers/net/wireless/iwlwifi/mvm/rx.c
drivers/net/wireless/iwlwifi/mvm/scan.c
drivers/net/wireless/iwlwifi/mvm/sf.c
drivers/net/wireless/iwlwifi/mvm/sta.c
drivers/net/wireless/iwlwifi/mvm/sta.h
drivers/net/wireless/iwlwifi/mvm/time-event.c
drivers/net/wireless/iwlwifi/mvm/tt.c
drivers/net/wireless/iwlwifi/mvm/tx.c
drivers/net/wireless/iwlwifi/mvm/utils.c
drivers/net/wireless/iwlwifi/pcie/internal.h
drivers/net/wireless/iwlwifi/pcie/rx.c
drivers/net/wireless/iwlwifi/pcie/trans.c
drivers/net/wireless/iwlwifi/pcie/tx.c
drivers/net/wireless/mac80211_hwsim.c
drivers/net/wireless/mwifiex/11ac.c
drivers/net/wireless/mwifiex/11n.c
drivers/net/wireless/mwifiex/11n_aggr.c
drivers/net/wireless/mwifiex/README
drivers/net/wireless/mwifiex/cmdevt.c
drivers/net/wireless/mwifiex/debugfs.c
drivers/net/wireless/mwifiex/decl.h
drivers/net/wireless/mwifiex/ioctl.h
drivers/net/wireless/mwifiex/main.c
drivers/net/wireless/mwifiex/main.h
drivers/net/wireless/mwifiex/pcie.c
drivers/net/wireless/mwifiex/scan.c
drivers/net/wireless/mwifiex/sdio.c
drivers/net/wireless/mwifiex/sdio.h
drivers/net/wireless/mwifiex/uap_cmd.c
drivers/net/wireless/mwifiex/usb.c
drivers/net/wireless/mwifiex/wmm.c
drivers/net/wireless/p54/main.c
drivers/net/wireless/ray_cs.c
drivers/net/wireless/rsi/rsi_91x_mgmt.c
drivers/net/wireless/rsi/rsi_mgmt.h
drivers/net/wireless/rt2x00/rt2800lib.c
drivers/net/wireless/rt2x00/rt2x00.h
drivers/net/wireless/rt2x00/rt2x00mac.c
drivers/net/wireless/rt2x00/rt61pci.c
drivers/net/wireless/rt2x00/rt73usb.c
drivers/net/wireless/rtl818x/rtl8180/Makefile
drivers/net/wireless/rtl818x/rtl8180/dev.c
drivers/net/wireless/rtl818x/rtl818x.h
drivers/net/wireless/rtlwifi/core.c
drivers/net/wireless/rtlwifi/rtl8188ee/sw.c
drivers/net/wireless/rtlwifi/rtl8188ee/trx.c
drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
drivers/net/wireless/rtlwifi/rtl8192se/trx.c
drivers/net/wireless/rtlwifi/rtl8723be/sw.c
drivers/net/wireless/ti/wlcore/main.c
drivers/net/wireless/ti/wlcore/sdio.c
drivers/net/wireless/ti/wlcore/spi.c
drivers/net/xen-netback/interface.c
drivers/net/xen-netfront.c
drivers/of/irq.c
drivers/of/of_mdio.c
drivers/of/platform.c
drivers/of/selftest.c
drivers/of/testcase-data/tests-interrupts.dtsi
drivers/phy/Kconfig
drivers/phy/Makefile
drivers/phy/phy-core.c
drivers/pinctrl/pinctrl-as3722.c
drivers/pinctrl/pinctrl-single.c
drivers/pinctrl/pinctrl-tb10x.c
drivers/pinctrl/sh-pfc/pfc-r8a7790.c
drivers/pinctrl/sh-pfc/pfc-r8a7791.c
drivers/pnp/pnpacpi/core.c
drivers/pnp/quirks.c
drivers/power/reset/vexpress-poweroff.c
drivers/ptp/Kconfig
drivers/ptp/ptp_clock.c
drivers/regulator/pbias-regulator.c
drivers/s390/cio/chsc.c
drivers/s390/net/claw.c
drivers/s390/net/ctcm_main.c
drivers/s390/net/lcs.c
drivers/s390/net/qeth_core.h
drivers/s390/net/qeth_core_main.c
drivers/s390/net/qeth_core_sys.c
drivers/s390/net/qeth_l2_main.c
drivers/s390/net/qeth_l3_main.c
drivers/scsi/hpsa.c
drivers/scsi/mpt2sas/mpt2sas_scsih.c
drivers/scsi/scsi_error.c
drivers/scsi/scsi_lib.c
drivers/scsi/scsi_netlink.c
drivers/scsi/virtio_scsi.c
drivers/spi/spi-atmel.c
drivers/spi/spi-bfin5xx.c
drivers/spi/spi-sh-hspi.c
drivers/spi/spi-sirf.c
drivers/staging/comedi/drivers/usbdux.c
drivers/staging/et131x/et131x.c
drivers/staging/ft1000/ft1000-pcmcia/ft1000_hw.c
drivers/staging/iio/adc/mxs-lradc.c
drivers/staging/iio/resolver/ad2s1200.c
drivers/staging/netlogic/xlr_net.c
drivers/staging/octeon/ethernet.c
drivers/staging/rtl8821ae/core.c
drivers/tty/hvc/hvc_console.c
drivers/tty/n_tty.c
drivers/tty/serial/8250/8250_core.c
drivers/tty/serial/8250/8250_dma.c
drivers/tty/serial/samsung.c
drivers/tty/serial/serial_core.c
drivers/tty/tty_buffer.c
drivers/usb/chipidea/core.c
drivers/usb/dwc3/core.c
drivers/usb/dwc3/gadget.c
drivers/usb/gadget/at91_udc.c
drivers/usb/gadget/f_fs.c
drivers/usb/gadget/f_rndis.c
drivers/usb/gadget/fsl_udc_core.c
drivers/usb/gadget/inode.c
drivers/usb/gadget/rndis.c
drivers/usb/gadget/u_ether.c
drivers/usb/gadget/zero.c
drivers/usb/host/ehci-fsl.c
drivers/usb/host/ohci-hub.c
drivers/usb/host/ohci-pci.c
drivers/usb/host/ohci.h
drivers/usb/host/xhci-pci.c
drivers/usb/host/xhci-ring.c
drivers/usb/host/xhci.c
drivers/usb/host/xhci.h
drivers/usb/musb/musb_dsps.c
drivers/usb/musb/omap2430.c
drivers/usb/phy/phy-am335x-control.c
drivers/usb/phy/phy-fsm-usb.c
drivers/usb/phy/phy.c
drivers/usb/serial/io_ti.c
drivers/usb/serial/option.c
drivers/usb/serial/qcserial.c
drivers/usb/serial/usb-serial.c
drivers/usb/storage/shuttle_usbat.c
drivers/usb/storage/unusual_devs.h
drivers/usb/wusbcore/mmc.c
drivers/usb/wusbcore/wa-xfer.c
drivers/uwb/drp.c
fs/aio.c
fs/btrfs/ctree.h
fs/btrfs/disk-io.c
fs/btrfs/extent-tree.c
fs/btrfs/file.c
fs/btrfs/inode-map.c
fs/btrfs/ioctl.c
fs/btrfs/send.c
fs/btrfs/super.c
fs/ceph/caps.c
fs/ceph/dir.c
fs/ceph/file.c
fs/ceph/inode.c
fs/ceph/ioctl.c
fs/ceph/locks.c
fs/ceph/super.h
fs/compat.c
fs/coredump.c
fs/ext4/balloc.c
fs/ext4/ext4.h
fs/ext4/extents.c
fs/ext4/extents_status.c
fs/ext4/file.c
fs/ext4/inode.c
fs/ext4/mballoc.c
fs/ext4/page-io.c
fs/ext4/super.c
fs/ext4/xattr.c
fs/fcntl.c
fs/kernfs/dir.c
fs/kernfs/file.c
fs/locks.c
fs/nfsd/nfs4callback.c
fs/nfsd/nfs4xdr.c
fs/open.c
fs/ubifs/super.c
fs/xfs/xfs_file.c
include/asm-generic/fixmap.h
include/asm-generic/word-at-a-time.h
include/dt-bindings/clock/tegra124-car.h
include/linux/can/core.h
include/linux/can/dev.h
include/linux/can/led.h
include/linux/can/platform/cc770.h
include/linux/can/platform/mcp251x.h
include/linux/can/platform/rcar_can.h [new file with mode: 0644]
include/linux/can/platform/sja1000.h
include/linux/can/platform/ti_hecc.h
include/linux/can/skb.h
include/linux/ethtool.h
include/linux/filter.h
include/linux/fs.h
include/linux/ftrace.h
include/linux/if_vlan.h
include/linux/interrupt.h
include/linux/irq.h
include/linux/libata.h
include/linux/mlx4/device.h
include/linux/netdevice.h
include/linux/netlink.h
include/linux/nl802154.h
include/linux/of_irq.h
include/linux/of_mdio.h
include/linux/phy.h
include/linux/phy/phy.h
include/linux/phy_fixed.h
include/linux/regulator/consumer.h
include/linux/serio.h
include/linux/skbuff.h
include/linux/sock_diag.h
include/linux/spi/at86rf230.h
include/linux/tcp.h
include/linux/usb/cdc_ncm.h
include/net/addrconf.h
include/net/af_ieee802154.h
include/net/af_vsock.h
include/net/bluetooth/hci.h
include/net/bluetooth/hci_core.h
include/net/cfg80211.h
include/net/checksum.h
include/net/dsa.h
include/net/ieee802154.h
include/net/ieee802154_netdev.h
include/net/inet_ecn.h
include/net/inet_hashtables.h
include/net/inet_sock.h
include/net/ip.h
include/net/ip6_checksum.h
include/net/ip6_route.h
include/net/ipv6.h
include/net/mac80211.h
include/net/net_namespace.h
include/net/netfilter/nf_tables.h
include/net/netfilter/nft_meta.h [new file with mode: 0644]
include/net/netns/ipv4.h
include/net/netns/ipv6.h
include/net/pkt_cls.h
include/net/regulatory.h
include/net/sch_generic.h
include/net/snmp.h
include/net/sock.h
include/net/tcp.h
include/net/tso.h [new file with mode: 0644]
include/net/vxlan.h
include/trace/events/ext4.h
include/trace/events/module.h
include/uapi/asm-generic/fcntl.h
include/uapi/linux/audit.h
include/uapi/linux/can.h
include/uapi/linux/can/bcm.h
include/uapi/linux/can/error.h
include/uapi/linux/can/gw.h
include/uapi/linux/can/netlink.h
include/uapi/linux/can/raw.h
include/uapi/linux/capability.h
include/uapi/linux/ethtool.h
include/uapi/linux/filter.h
include/uapi/linux/if_fddi.h
include/uapi/linux/input.h
include/uapi/linux/netfilter/nf_tables.h
include/uapi/linux/nl80211.h
include/uapi/linux/tipc.h
include/uapi/linux/tipc_config.h
kernel/audit.c
kernel/hrtimer.c
kernel/irq/irqdesc.c
kernel/irq/manage.c
kernel/module.c
kernel/power/suspend.c
kernel/seccomp.c
kernel/softirq.c
kernel/sysctl.c
kernel/timer.c
kernel/trace/ftrace.c
kernel/trace/trace_events_trigger.c
lib/Kconfig.debug
lib/Makefile
lib/test_bpf.c [new file with mode: 0644]
mm/memory.c
mm/vmacache.c
net/8021q/vlan_core.c
net/8021q/vlan_dev.c
net/batman-adv/debugfs.c
net/batman-adv/distributed-arp-table.c
net/batman-adv/main.h
net/batman-adv/network-coding.c
net/batman-adv/soft-interface.c
net/batman-adv/sysfs.c
net/bluetooth/hci_conn.c
net/bluetooth/hci_core.c
net/bluetooth/hci_event.c
net/bluetooth/hci_sock.c
net/bluetooth/lib.c
net/bluetooth/mgmt.c
net/bridge/Makefile
net/bridge/br.c
net/bridge/br_device.c
net/bridge/br_fdb.c
net/bridge/br_if.c
net/bridge/br_netfilter.c
net/bridge/br_netlink.c
net/bridge/br_notify.c [deleted file]
net/bridge/br_private.h
net/bridge/br_sysfs_if.c
net/bridge/br_vlan.c
net/bridge/netfilter/Kconfig
net/bridge/netfilter/Makefile
net/bridge/netfilter/nft_meta_bridge.c [new file with mode: 0644]
net/can/af_can.c
net/can/af_can.h
net/can/gw.c
net/can/proc.c
net/ceph/osdmap.c
net/core/Makefile
net/core/dev.c
net/core/ethtool.c
net/core/filter.c
net/core/net_namespace.c
net/core/pktgen.c
net/core/ptp_classifier.c
net/core/rtnetlink.c
net/core/skbuff.c
net/core/sock.c
net/core/sock_diag.c
net/core/tso.c [new file with mode: 0644]
net/dcb/dcbnl.c
net/dccp/proto.c
net/dccp/sysctl.c
net/dccp/timer.c
net/decnet/dn_dev.c
net/decnet/dn_fib.c
net/decnet/netfilter/dn_rtmsg.c
net/dsa/slave.c
net/ieee802154/6lowpan_rtnl.c
net/ieee802154/dgram.c
net/ieee802154/header_ops.c
net/ieee802154/ieee802154.h
net/ieee802154/netlink.c
net/ieee802154/nl-mac.c
net/ieee802154/nl_policy.c
net/ipv4/af_inet.c
net/ipv4/devinet.c
net/ipv4/fib_semantics.c
net/ipv4/gre_demux.c
net/ipv4/icmp.c
net/ipv4/igmp.c
net/ipv4/inet_connection_sock.c
net/ipv4/inet_hashtables.c
net/ipv4/inetpeer.c
net/ipv4/ip_forward.c
net/ipv4/ip_fragment.c
net/ipv4/ip_gre.c
net/ipv4/ip_options.c
net/ipv4/ip_output.c
net/ipv4/ip_tunnel.c
net/ipv4/ipip.c
net/ipv4/netfilter/nf_defrag_ipv4.c
net/ipv4/ping.c
net/ipv4/proc.c
net/ipv4/route.c
net/ipv4/syncookies.c
net/ipv4/sysctl_net_ipv4.c
net/ipv4/tcp_bic.c
net/ipv4/tcp_cong.c
net/ipv4/tcp_cubic.c
net/ipv4/tcp_fastopen.c
net/ipv4/tcp_highspeed.c
net/ipv4/tcp_htcp.c
net/ipv4/tcp_hybla.c
net/ipv4/tcp_illinois.c
net/ipv4/tcp_input.c
net/ipv4/tcp_ipv4.c
net/ipv4/tcp_lp.c
net/ipv4/tcp_minisocks.c
net/ipv4/tcp_output.c
net/ipv4/tcp_scalable.c
net/ipv4/tcp_vegas.c
net/ipv4/tcp_veno.c
net/ipv4/tcp_yeah.c
net/ipv4/udp.c
net/ipv4/xfrm4_output.c
net/ipv6/addrconf.c
net/ipv6/addrconf_core.c
net/ipv6/af_inet6.c
net/ipv6/icmp.c
net/ipv6/inet6_connection_sock.c
net/ipv6/ip6_checksum.c
net/ipv6/ip6_fib.c
net/ipv6/ip6_flowlabel.c
net/ipv6/ip6_gre.c
net/ipv6/ip6_output.c
net/ipv6/ip6_tunnel.c
net/ipv6/ip6_vti.c
net/ipv6/ip6mr.c
net/ipv6/netfilter.c
net/ipv6/netfilter/ip6t_rpfilter.c
net/ipv6/netfilter/nf_conntrack_reasm.c
net/ipv6/ping.c
net/ipv6/proc.c
net/ipv6/raw.c
net/ipv6/route.c
net/ipv6/sit.c
net/ipv6/syncookies.c
net/ipv6/sysctl_net_ipv6.c
net/ipv6/tcp_ipv6.c
net/ipv6/udp.c
net/ipv6/xfrm6_output.c
net/l2tp/l2tp_core.c
net/l2tp/l2tp_ip6.c
net/mac80211/aes_ccm.c
net/mac80211/cfg.c
net/mac80211/chan.c
net/mac80211/debugfs.c
net/mac80211/debugfs.h
net/mac80211/debugfs_netdev.h
net/mac80211/driver-ops.h
net/mac80211/ht.c
net/mac80211/ibss.c
net/mac80211/ieee80211_i.h
net/mac80211/iface.c
net/mac80211/main.c
net/mac80211/mesh.c
net/mac80211/mesh_hwmp.c
net/mac80211/michael.h
net/mac80211/mlme.c
net/mac80211/rx.c
net/mac80211/scan.c
net/mac80211/sta_info.c
net/mac80211/status.c
net/mac80211/util.c
net/mac80211/wpa.c
net/mac802154/Kconfig
net/mac802154/Makefile
net/mac802154/llsec.c [new file with mode: 0644]
net/mac802154/llsec.h [new file with mode: 0644]
net/mac802154/mac802154.h
net/mac802154/mac_cmd.c
net/mac802154/mib.c
net/mac802154/rx.c
net/mac802154/wpan.c
net/netfilter/ipvs/ip_vs_xmit.c
net/netfilter/nf_conntrack_netlink.c
net/netfilter/nf_tables_api.c
net/netfilter/nfnetlink.c
net/netfilter/nft_ct.c
net/netfilter/nft_hash.c
net/netfilter/nft_lookup.c
net/netfilter/nft_meta.c
net/netfilter/nft_rbtree.c
net/netlink/af_netlink.c
net/netlink/af_netlink.h
net/netlink/genetlink.c
net/openvswitch/actions.c
net/openvswitch/datapath.c
net/openvswitch/datapath.h
net/openvswitch/flow.c
net/openvswitch/flow.h
net/openvswitch/flow_netlink.c
net/openvswitch/flow_netlink.h
net/openvswitch/flow_table.c
net/openvswitch/flow_table.h
net/openvswitch/vport-gre.c
net/openvswitch/vport-internal_dev.c
net/openvswitch/vport-vxlan.c
net/openvswitch/vport.h
net/packet/diag.c
net/phonet/pn_netlink.c
net/rds/ib_send.c
net/rds/iw_send.c
net/rds/iw_sysctl.c
net/rds/sysctl.c
net/sched/act_api.c
net/sched/cls_api.c
net/sched/cls_basic.c
net/sched/cls_bpf.c
net/sched/cls_cgroup.c
net/sched/cls_flow.c
net/sched/cls_fw.c
net/sched/cls_route.c
net/sched/cls_rsvp.h
net/sched/cls_tcindex.c
net/sched/cls_u32.c
net/sched/sch_api.c
net/sched/sch_hhf.c
net/sctp/ipv6.c
net/sctp/output.c
net/sctp/proc.c
net/sctp/protocol.c
net/sctp/sm_sideeffect.c
net/sctp/socket.c
net/sctp/sysctl.c
net/tipc/Makefile
net/tipc/bcast.c
net/tipc/bcast.h
net/tipc/bearer.c
net/tipc/bearer.h
net/tipc/config.c
net/tipc/core.c
net/tipc/core.h
net/tipc/discover.c
net/tipc/discover.h
net/tipc/eth_media.c
net/tipc/handler.c [deleted file]
net/tipc/ib_media.c
net/tipc/link.c
net/tipc/link.h
net/tipc/msg.c
net/tipc/msg.h
net/tipc/name_distr.c
net/tipc/name_distr.h
net/tipc/name_table.c
net/tipc/net.c
net/tipc/net.h
net/tipc/netlink.c
net/tipc/node.c
net/tipc/node.h
net/tipc/node_subscr.c
net/tipc/node_subscr.h
net/tipc/port.c
net/tipc/port.h
net/tipc/socket.c
net/tipc/socket.h
net/unix/af_unix.c
net/vmw_vsock/af_vsock.c
net/wireless/Kconfig
net/wireless/chan.c
net/wireless/core.c
net/wireless/core.h
net/wireless/ethtool.c
net/wireless/ibss.c
net/wireless/mesh.c
net/wireless/mlme.c
net/wireless/nl80211.c
net/wireless/rdev-ops.h
net/wireless/reg.c
net/wireless/reg.h
net/wireless/scan.c
net/wireless/sme.c
net/wireless/trace.h
net/wireless/util.c
net/wireless/wext-compat.c
net/wireless/wext-compat.h
net/wireless/wext-sme.c
net/xfrm/xfrm_policy.c
net/xfrm/xfrm_proc.c
net/xfrm/xfrm_user.c
scripts/sortextable.c
security/selinux/hooks.c
security/selinux/include/classmap.h
sound/pci/hda/hda_controller.c
sound/pci/hda/hda_intel.c
sound/pci/hda/hda_priv.h
sound/pci/hda/patch_realtek.c
sound/soc/codecs/alc5623.c
sound/soc/codecs/cs42l52.c
sound/soc/codecs/cs42l73.c
sound/soc/codecs/tlv320aic3x.c
sound/soc/fsl/fsl_spdif.h
sound/soc/intel/sst-dsp-priv.h
sound/soc/intel/sst-haswell-ipc.c
sound/soc/jz4740/Makefile
sound/soc/sh/rcar/src.c
sound/soc/sh/rcar/ssi.c
sound/soc/soc-dapm.c
tools/lib/api/fs/debugfs.c
tools/lib/traceevent/event-parse.c
tools/lib/traceevent/event-parse.h
tools/net/bpf_dbg.c
tools/net/bpf_exp.l
tools/net/bpf_exp.y
tools/net/bpf_jit_disasm.c
tools/perf/Makefile.perf
tools/perf/arch/x86/tests/dwarf-unwind.c
tools/perf/arch/x86/tests/regs_load.S
tools/perf/builtin-kvm.c
tools/perf/builtin-record.c
tools/perf/config/Makefile
tools/perf/tests/make
tools/perf/util/data.c
tools/perf/util/machine.c
tools/perf/util/symbol-elf.c
tools/power/acpi/Makefile
tools/testing/selftests/net/Makefile
virt/kvm/arm/vgic.c
virt/kvm/assigned-dev.c
virt/kvm/async_pf.c

index 044b76436e8373ae601f9c60bd274754f3df6033..d9b9416c989fd81f0a9a338b59fc9093d96479a4 100644 (file)
 !Finclude/net/cfg80211.h wdev_priv
 !Finclude/net/cfg80211.h ieee80211_iface_limit
 !Finclude/net/cfg80211.h ieee80211_iface_combination
+!Finclude/net/cfg80211.h cfg80211_check_combinations
       </chapter>
       <chapter>
       <title>Actions and configuration</title>
index 06fc7602593a9d38a4cec97538948b03b5ce2676..37b2cafa4e52703b516d8c163653ae58106ad990 100644 (file)
@@ -19,6 +19,9 @@ to deliver its interrupts via SPIs.
 
 - clock-frequency : The frequency of the main counter, in Hz. Optional.
 
+- always-on : a boolean property. If present, the timer is powered through an
+  always-on power domain, therefore it never loses context.
+
 Example:
 
        timer {
index 7bcfbf59810e5a5c105740c7dca63844d7791ef2..a668f0e7d0018b76841127db20845a7fd45affd6 100644 (file)
@@ -24,6 +24,7 @@ Required properties:
   * "sata-phy" for the SATA 6.0Gbps PHY
 
 Optional properties:
+- dma-coherent         : Present if dma operations are coherent
 - status               : Shall be "ok" if enabled or "disabled" if disabled.
                          Default is "ok".
 
@@ -55,6 +56,7 @@ Example:
                              <0x0 0x1f22e000 0x0 0x1000>,
                              <0x0 0x1f227000 0x0 0x1000>;
                        interrupts = <0x0 0x87 0x4>;
+                       dma-coherent;
                        status = "ok";
                        clocks = <&sataclk 0>;
                        phys = <&phy2 0>;
@@ -69,6 +71,7 @@ Example:
                              <0x0 0x1f23e000 0x0 0x1000>,
                              <0x0 0x1f237000 0x0 0x1000>;
                        interrupts = <0x0 0x88 0x4>;
+                       dma-coherent;
                        status = "ok";
                        clocks = <&sataclk 0>;
                        phys = <&phy3 0>;
index 7fbb027218a126002312a829c6cd273ac715b030..a1d71eb43b209485ac7bec4832119f25fe68a49e 100644 (file)
@@ -4,11 +4,15 @@ Required properties:
 - compatible: Should be "snps,arc-emac"
 - reg: Address and length of the register set for the device
 - interrupts: Should contain the EMAC interrupts
-- clock-frequency: CPU frequency. It is needed to calculate and set polling
-period of EMAC.
 - max-speed: see ethernet.txt file in the same directory.
 - phy: see ethernet.txt file in the same directory.
 
+Clock handling:
+The clock frequency is needed to calculate and set polling period of EMAC.
+It must be provided by one of:
+- clock-frequency: CPU frequency.
+- clocks: reference to the clock supplying the EMAC.
+
 Child nodes of the driver are the individual PHY devices connected to the
 MDIO bus. They must have a "reg" property given the PHY address on the MDIO bus.
 
@@ -19,7 +23,11 @@ Examples:
                reg = <0xc0fc2000 0x3c>;
                interrupts = <6>;
                mac-address = [ 00 11 22 33 44 55 ];
+
                clock-frequency = <80000000>;
+               /* or */
+               clocks = <&emac_clock>;
+
                max-speed = <100>;
                phy = <&phy0>;
 
index f2febb94550e8868b32497001f3a6514feddddfb..451fef26b4dfaf05b6782099e54816d52a52baa8 100644 (file)
@@ -24,7 +24,7 @@ Optional properties:
 - fixed-link: When the GENET interface is connected to a MoCA hardware block or
   when operating in a RGMII to RGMII type of connection, or when the MDIO bus is
   voluntarily disabled, this property should be used to describe the "fixed link".
-  See Documentation/devicetree/bindings/net/fsl-tsec-phy.txt for information on
+  See Documentation/devicetree/bindings/net/fixed-link.txt for information on
   the property specifics
 
 Required child nodes:
diff --git a/Documentation/devicetree/bindings/net/broadcom-systemport.txt b/Documentation/devicetree/bindings/net/broadcom-systemport.txt
new file mode 100644 (file)
index 0000000..c183ea9
--- /dev/null
@@ -0,0 +1,29 @@
+* Broadcom BCM7xxx Ethernet Systemport Controller (SYSTEMPORT)
+
+Required properties:
+- compatible: should be one of "brcm,systemport-v1.00" or "brcm,systemport"
+- reg: address and length of the register set for the device.
+- interrupts: interrupts for the device, first cell must be for the the rx
+  interrupts, and the second cell should be for the transmit queues
+- local-mac-address: Ethernet MAC address (48 bits) of this adapter
+- phy-mode: Should be a string describing the PHY interface to the
+  Ethernet switch/PHY, see Documentation/devicetree/bindings/net/ethernet.txt
+- fixed-link: see Documentation/devicetree/bindings/net/fixed-link.txt for
+  the property specific details
+
+Optional properties:
+- systemport,num-tier2-arb: number of tier 2 arbiters, an integer
+- systemport,num-tier1-arb: number of tier 1 arbiters, an integer
+- systemport,num-txq: number of HW transmit queues, an integer
+- systemport,num-rxq: number of HW receive queues, an integer
+
+Example:
+ethernet@f04a0000 {
+       compatible = "brcm,systemport-v1.00";
+       reg = <0xf04a0000 0x4650>;
+       local-mac-address = [ 00 11 22 33 44 55 ];
+       fixed-link = <0 1 1000 0 0>;
+       phy-mode = "gmii";
+       interrupts = <0x0 0x16 0x0>,
+               <0x0 0x17 0x0>;
+};
index 7ff57a119f81f449cfc2afb5ef988183071a8aa8..764c0c79b43d391435b847614c8d6c7aa4f06653 100644 (file)
@@ -2,7 +2,9 @@ TI CPSW Phy mode Selection Device Tree Bindings
 -----------------------------------------------
 
 Required properties:
-- compatible           : Should be "ti,am3352-cpsw-phy-sel"
+- compatible           : Should be "ti,am3352-cpsw-phy-sel" for am335x platform and
+                         "ti,dra7xx-cpsw-phy-sel" for dra7xx platform
+                         "ti,am43xx-cpsw-phy-sel" for am43xx platform
 - reg                  : physical base address and size of the cpsw
                          registers map
 - reg-names            : names of the register map given in "reg" node
diff --git a/Documentation/devicetree/bindings/net/fixed-link.txt b/Documentation/devicetree/bindings/net/fixed-link.txt
new file mode 100644 (file)
index 0000000..82bf7e0
--- /dev/null
@@ -0,0 +1,42 @@
+Fixed link Device Tree binding
+------------------------------
+
+Some Ethernet MACs have a "fixed link", and are not connected to a
+normal MDIO-managed PHY device. For those situations, a Device Tree
+binding allows to describe a "fixed link".
+
+Such a fixed link situation is described by creating a 'fixed-link'
+sub-node of the Ethernet MAC device node, with the following
+properties:
+
+* 'speed' (integer, mandatory), to indicate the link speed. Accepted
+  values are 10, 100 and 1000
+* 'full-duplex' (boolean, optional), to indicate that full duplex is
+  used. When absent, half duplex is assumed.
+* 'pause' (boolean, optional), to indicate that pause should be
+  enabled.
+* 'asym-pause' (boolean, optional), to indicate that asym_pause should
+  be enabled.
+
+Old, deprecated 'fixed-link' binding:
+
+* A 'fixed-link' property in the Ethernet MAC node, with 5 cells, of the
+  form <a b c d e> with the following accepted values:
+  - a: emulated PHY ID, choose any but but unique to the all specified
+    fixed-links, from 0 to 31
+  - b: duplex configuration: 0 for half duplex, 1 for full duplex
+  - c: link speed in Mbits/sec, accepted values are: 10, 100 and 1000
+  - d: pause configuration: 0 for no pause, 1 for pause
+  - e: asymmetric pause configuration: 0 for no asymmetric pause, 1 for
+    asymmetric pause
+
+Example:
+
+ethernet@0 {
+       ...
+       fixed-link {
+             speed = <1000>;
+             full-duplex;
+       };
+       ...
+};
index 737cdef4f9036eb6069b9f536f137351dc42b137..be6ea8960f208c72b7d69e0286c56712661ccb2c 100644 (file)
@@ -42,10 +42,7 @@ Properties:
     interrupt.  For TSEC and eTSEC devices, the first interrupt is
     transmit, the second is receive, and the third is error.
   - phy-handle : See ethernet.txt file in the same directory.
-  - fixed-link : <a b c d e> where a is emulated phy id - choose any,
-    but unique to the all specified fixed-links, b is duplex - 0 half,
-    1 full, c is link speed - d#10/d#100/d#1000, d is pause - 0 no
-    pause, 1 pause, e is asym_pause - 0 no asym_pause, 1 asym_pause.
+  - fixed-link : See fixed-link.txt in the same directory.
   - phy-connection-type : See ethernet.txt file in the same directory.
     This property is only really needed if the connection is of type
     "rgmii-id", as all other connection types are detected by hardware.
diff --git a/Documentation/devicetree/bindings/net/ieee802154/at86rf230.txt b/Documentation/devicetree/bindings/net/ieee802154/at86rf230.txt
new file mode 100644 (file)
index 0000000..d3bbdde
--- /dev/null
@@ -0,0 +1,23 @@
+* AT86RF230 IEEE 802.15.4 *
+
+Required properties:
+  - compatible:                should be "atmel,at86rf230", "atmel,at86rf231",
+                       "atmel,at86rf233" or "atmel,at86rf212"
+  - spi-max-frequency: maximal bus speed, should be set to 7500000 depends
+                       sync or async operation mode
+  - reg:               the chipselect index
+  - interrupts:                the interrupt generated by the device
+
+Optional properties:
+  - reset-gpio:                GPIO spec for the rstn pin
+  - sleep-gpio:                GPIO spec for the slp_tr pin
+
+Example:
+
+       at86rf231@0 {
+               compatible = "atmel,at86rf231";
+               spi-max-frequency = <7500000>;
+               reg = <0>;
+               interrupts = <19 1>;
+               interrupt-parent = <&gpio3>;
+       };
index c79bab025369af4bb6320ba0e90f7fb386942cc1..8dbcf8295c6c9ceaa4eb7518694acf3b09095dc1 100644 (file)
@@ -14,7 +14,7 @@ node.
 Example:
 
 aliases {
-       mdio-gpio0 = <&mdio0>;
+       mdio-gpio0 = &mdio0;
 };
 
 mdio0: mdio {
diff --git a/Documentation/devicetree/bindings/net/micrel-ksz9021.txt b/Documentation/devicetree/bindings/net/micrel-ksz9021.txt
deleted file mode 100644 (file)
index 997a63f..0000000
+++ /dev/null
@@ -1,49 +0,0 @@
-Micrel KSZ9021 Gigabit Ethernet PHY
-
-Some boards require special tuning values, particularly when it comes to
-clock delays.  You can specify clock delay values by adding
-micrel-specific properties to an Ethernet OF device node.
-
-All skew control options are specified in picoseconds.  The minimum
-value is 0, and the maximum value is 3000.
-
-Optional properties:
- - rxc-skew-ps : Skew control of RXC pad
- - rxdv-skew-ps : Skew control of RX CTL pad
- - txc-skew-ps : Skew control of TXC pad
- - txen-skew-ps : Skew control of TX_CTL pad
- - rxd0-skew-ps : Skew control of RX data 0 pad
- - rxd1-skew-ps : Skew control of RX data 1 pad
- - rxd2-skew-ps : Skew control of RX data 2 pad
- - rxd3-skew-ps : Skew control of RX data 3 pad
- - txd0-skew-ps : Skew control of TX data 0 pad
- - txd1-skew-ps : Skew control of TX data 1 pad
- - txd2-skew-ps : Skew control of TX data 2 pad
- - txd3-skew-ps : Skew control of TX data 3 pad
-
-Examples:
-
-       /* Attach to an Ethernet device with autodetected PHY */
-       &enet {
-               rxc-skew-ps = <3000>;
-               rxdv-skew-ps = <0>;
-               txc-skew-ps = <3000>;
-               txen-skew-ps = <0>;
-               status = "okay";
-       };
-
-       /* Attach to an explicitly-specified PHY */
-       mdio {
-               phy0: ethernet-phy@0 {
-                       rxc-skew-ps = <3000>;
-                       rxdv-skew-ps = <0>;
-                       txc-skew-ps = <3000>;
-                       txen-skew-ps = <0>;
-                       reg = <0>;
-               };
-       };
-       ethernet@70000 {
-               status = "okay";
-               phy = <&phy0>;
-               phy-mode = "rgmii-id";
-       };
diff --git a/Documentation/devicetree/bindings/net/micrel-ksz90x1.txt b/Documentation/devicetree/bindings/net/micrel-ksz90x1.txt
new file mode 100644 (file)
index 0000000..692076f
--- /dev/null
@@ -0,0 +1,83 @@
+Micrel KSZ9021/KSZ9031 Gigabit Ethernet PHY
+
+Some boards require special tuning values, particularly when it comes to
+clock delays. You can specify clock delay values by adding
+micrel-specific properties to an Ethernet OF device node.
+
+Note that these settings are applied after any phy-specific fixup from
+phy_fixup_list (see phy_init_hw() from drivers/net/phy/phy_device.c),
+and therefore may overwrite them.
+
+KSZ9021:
+
+  All skew control options are specified in picoseconds. The minimum
+  value is 0, the maximum value is 3000, and it is incremented by 200ps
+  steps.
+
+  Optional properties:
+
+    - rxc-skew-ps : Skew control of RXC pad
+    - rxdv-skew-ps : Skew control of RX CTL pad
+    - txc-skew-ps : Skew control of TXC pad
+    - txen-skew-ps : Skew control of TX CTL pad
+    - rxd0-skew-ps : Skew control of RX data 0 pad
+    - rxd1-skew-ps : Skew control of RX data 1 pad
+    - rxd2-skew-ps : Skew control of RX data 2 pad
+    - rxd3-skew-ps : Skew control of RX data 3 pad
+    - txd0-skew-ps : Skew control of TX data 0 pad
+    - txd1-skew-ps : Skew control of TX data 1 pad
+    - txd2-skew-ps : Skew control of TX data 2 pad
+    - txd3-skew-ps : Skew control of TX data 3 pad
+
+KSZ9031:
+
+  All skew control options are specified in picoseconds. The minimum
+  value is 0, and the maximum is property-dependent. The increment
+  step is 60ps.
+
+  Optional properties:
+
+    Maximum value of 1860:
+
+      - rxc-skew-ps : Skew control of RX clock pad
+      - txc-skew-ps : Skew control of TX clock pad
+
+    Maximum value of 900:
+
+      - rxdv-skew-ps : Skew control of RX CTL pad
+      - txen-skew-ps : Skew control of TX CTL pad
+      - rxd0-skew-ps : Skew control of RX data 0 pad
+      - rxd1-skew-ps : Skew control of RX data 1 pad
+      - rxd2-skew-ps : Skew control of RX data 2 pad
+      - rxd3-skew-ps : Skew control of RX data 3 pad
+      - txd0-skew-ps : Skew control of TX data 0 pad
+      - txd1-skew-ps : Skew control of TX data 1 pad
+      - txd2-skew-ps : Skew control of TX data 2 pad
+      - txd3-skew-ps : Skew control of TX data 3 pad
+
+Examples:
+
+       /* Attach to an Ethernet device with autodetected PHY */
+       &enet {
+               rxc-skew-ps = <3000>;
+               rxdv-skew-ps = <0>;
+               txc-skew-ps = <3000>;
+               txen-skew-ps = <0>;
+               status = "okay";
+       };
+
+       /* Attach to an explicitly-specified PHY */
+       mdio {
+               phy0: ethernet-phy@0 {
+                       rxc-skew-ps = <3000>;
+                       rxdv-skew-ps = <0>;
+                       txc-skew-ps = <3000>;
+                       txen-skew-ps = <0>;
+                       reg = <0>;
+               };
+       };
+       ethernet@70000 {
+               status = "okay";
+               phy = <&phy0>;
+               phy-mode = "rgmii-id";
+       };
index 636f0ac4e22388b4c8934681f7a7fc3712d30a0f..2a60cd3e8d5ddb7bdf3b2caad2bc414a3d8566e0 100644 (file)
@@ -23,5 +23,5 @@ gmac0: ethernet@ff700000 {
        interrupt-names = "macirq";
        mac-address = [00 00 00 00 00 00];/* Filled in by U-Boot */
        clocks = <&emac_0_clk>;
-       clocks-names = "stmmaceth";
+       clock-names = "stmmaceth";
 };
index 80c1fb8bfbb8bd778a6682fa75d863ce51d3c0e4..a2acd2b26baf78c8aafc3948d7dc7cb012c09db5 100644 (file)
@@ -33,7 +33,7 @@ Optional properties:
 - max-frame-size: See ethernet.txt file in the same directory
 - clocks: If present, the first clock should be the GMAC main clock,
   further clocks may be specified in derived bindings.
-- clocks-names: One name for each entry in the clocks property, the
+- clock-names: One name for each entry in the clocks property, the
   first one should be "stmmaceth".
 
 Examples:
diff --git a/Documentation/devicetree/bindings/net/via-rhine.txt b/Documentation/devicetree/bindings/net/via-rhine.txt
new file mode 100644 (file)
index 0000000..334eca2
--- /dev/null
@@ -0,0 +1,17 @@
+* VIA Rhine 10/100 Network Controller
+
+Required properties:
+- compatible : Should be "via,vt8500-rhine" for integrated
+       Rhine controllers found in VIA VT8500, WonderMedia WM8950
+       and similar. These are listed as 1106:3106 rev. 0x84 on the
+       virtual PCI bus under vendor-provided kernels
+- reg : Address and length of the io space
+- interrupts : Should contain the controller interrupt line
+
+Examples:
+
+ethernet@d8004000 {
+       compatible = "via,vt8500-rhine";
+       reg = <0xd8004000 0x100>;
+       interrupts = <10>;
+};
index 4bd5be0e5e7dd51eaf7cf23a92a2bf884dd264f1..26bcb18f4e609288d006eeae5bbf496730e2921f 100644 (file)
@@ -83,7 +83,7 @@ Example:
                reg             = <0xfe61f080 0x4>;
                reg-names       = "irqmux";
                interrupts      = <GIC_SPI 180 IRQ_TYPE_LEVEL_HIGH>;
-               interrupts-names = "irqmux";
+               interrupt-names = "irqmux";
                ranges          = <0 0xfe610000 0x5000>;
 
                PIO0: gpio@fe610000 {
@@ -165,7 +165,7 @@ sdhci0:sdhci@fe810000{
        interrupt-parent = <&PIO3>;
        #interrupt-cells = <2>;
        interrupts = <3 IRQ_TYPE_LEVEL_HIGH>; /* Interrupt line via PIO3-3 */
-       interrupts-names = "card-detect";
+       interrupt-names = "card-detect";
        pinctrl-names = "default";
        pinctrl-0       = <&pinctrl_mmc>;
 };
index 569b26c4a81ee25e1f141329f90903dcb28ab4e4..60ca07996458576e2fcc6f85a334e13fcec5a2c7 100644 (file)
@@ -47,7 +47,7 @@ mcasp0: mcasp0@1d00000 {
        reg = <0x100000 0x3000>;
        reg-names "mpu";
        interrupts = <82>, <83>;
-       interrupts-names = "tx", "rx";
+       interrupt-names = "tx", "rx";
        op-mode = <0>;          /* MCASP_IIS_MODE */
        tdm-slots = <2>;
        serial-dir = <
index 74c66dee3e146445b5b1593670dc52473f527165..eff12be5e789cf91bb4a5d4a21bab7f7d1b7d32c 100644 (file)
@@ -13,6 +13,9 @@ Required properties:
     "ti,tlv320aic3111" - TLV320AIC3111 (stereo speaker amp, MiniDSP)
 
 - reg - <int> -  I2C slave address
+- HPVDD-supply, SPRVDD-supply, SPLVDD-supply, AVDD-supply, IOVDD-supply,
+  DVDD-supply : power supplies for the device as covered in
+  Documentation/devicetree/bindings/regulator/regulator.txt
 
 
 Optional properties:
@@ -24,9 +27,6 @@ Optional properties:
         3 or MICBIAS_AVDD - MICBIAS output is connected to AVDD
        If this node is not mentioned or if the value is unknown, then
        micbias is set to 2.0V.
-- HPVDD-supply, SPRVDD-supply, SPLVDD-supply, AVDD-supply, IOVDD-supply,
-  DVDD-supply : power supplies for the device as covered in
-  Documentation/devicetree/bindings/regulator/regulator.txt
 
 CODEC output pins:
   * HPL
index 4f7897e99cba8a8fc7b5a33343825cfb94ce2d68..c74e04494ade32bd60b0cef1aafe725352afb0ca 100644 (file)
@@ -308,3 +308,8 @@ SLAVE DMA ENGINE
 
 SPI
   devm_spi_register_master()
+
+MDIO
+  devm_mdiobus_alloc()
+  devm_mdiobus_alloc_size()
+  devm_mdiobus_free()
index a383c00392d03f2e316f7fe7dd954c7d8b71f274..9c723ecd00251534a0b011c4e0ffbd053242454e 100644 (file)
@@ -585,13 +585,19 @@ mode
        balance-tlb or 5
 
                Adaptive transmit load balancing: channel bonding that
-               does not require any special switch support.  The
-               outgoing traffic is distributed according to the
-               current load (computed relative to the speed) on each
-               slave.  Incoming traffic is received by the current
-               slave.  If the receiving slave fails, another slave
-               takes over the MAC address of the failed receiving
-               slave.
+               does not require any special switch support.
+
+               In tlb_dynamic_lb=1 mode; the outgoing traffic is
+               distributed according to the current load (computed
+               relative to the speed) on each slave.
+
+               In tlb_dynamic_lb=0 mode; the load balancing based on
+               current load is disabled and the load is distributed
+               only using the hash distribution.
+
+               Incoming traffic is received by the current slave.
+               If the receiving slave fails, another slave takes over
+               the MAC address of the failed receiving slave.
 
                Prerequisite:
 
@@ -736,6 +742,28 @@ primary_reselect
 
        This option was added for bonding version 3.6.0.
 
+tlb_dynamic_lb
+
+       Specifies if dynamic shuffling of flows is enabled in tlb
+       mode. The value has no effect on any other modes.
+
+       The default behavior of tlb mode is to shuffle active flows across
+       slaves based on the load in that interval. This gives nice lb
+       characteristics but can cause packet reordering. If re-ordering is
+       a concern use this variable to disable flow shuffling and rely on
+       load balancing provided solely by the hash distribution.
+       xmit-hash-policy can be used to select the appropriate hashing for
+       the setup.
+
+       The sysfs entry can be used to change the setting per bond device
+       and the initial value is derived from the module parameter. The
+       sysfs entry is allowed to be changed only if the bond device is
+       down.
+
+       The default value is "1" that enables flow shuffling while value "0"
+       disables it. This option was added in bonding driver 3.7.1
+
+
 updelay
 
        Specifies the time, in milliseconds, to wait before enabling a
@@ -769,7 +797,7 @@ use_carrier
 xmit_hash_policy
 
        Selects the transmit hash policy to use for slave selection in
-       balance-xor and 802.3ad modes.  Possible values are:
+       balance-xor, 802.3ad, and tlb modes.  Possible values are:
 
        layer2
 
index 2fa44cbe81b73433f40db59ea50dc4ebbca917aa..cdd381c5311d1ad9de20df4db52b6e1bdfac21b9 100644 (file)
@@ -469,6 +469,41 @@ solution for a couple of reasons:
   having this 'send only' use-case we may remove the receive list in the
   Kernel to save a little (really a very little!) CPU usage.
 
+  4.1.1.1 CAN filter usage optimisation
+
+  The CAN filters are processed in per-device filter lists at CAN frame
+  reception time. To reduce the number of checks that need to be performed
+  while walking through the filter lists the CAN core provides an optimized
+  filter handling when the filter subscription focusses on a single CAN ID.
+
+  For the possible 2048 SFF CAN identifiers the identifier is used as an index
+  to access the corresponding subscription list without any further checks.
+  For the 2^29 possible EFF CAN identifiers a 10 bit XOR folding is used as
+  hash function to retrieve the EFF table index.
+
+  To benefit from the optimized filters for single CAN identifiers the
+  CAN_SFF_MASK or CAN_EFF_MASK have to be set into can_filter.mask together
+  with set CAN_EFF_FLAG and CAN_RTR_FLAG bits. A set CAN_EFF_FLAG bit in the
+  can_filter.mask makes clear that it matters whether a SFF or EFF CAN ID is
+  subscribed. E.g. in the example from above
+
+    rfilter[0].can_id   = 0x123;
+    rfilter[0].can_mask = CAN_SFF_MASK;
+
+  both SFF frames with CAN ID 0x123 and EFF frames with 0xXXXXX123 can pass.
+
+  To filter for only 0x123 (SFF) and 0x12345678 (EFF) CAN identifiers the
+  filter has to be defined in this way to benefit from the optimized filters:
+
+    struct can_filter rfilter[2];
+
+    rfilter[0].can_id   = 0x123;
+    rfilter[0].can_mask = (CAN_EFF_FLAG | CAN_RTR_FLAG | CAN_SFF_MASK);
+    rfilter[1].can_id   = 0x12345678 | CAN_EFF_FLAG;
+    rfilter[1].can_mask = (CAN_EFF_FLAG | CAN_RTR_FLAG | CAN_EFF_MASK);
+
+    setsockopt(s, SOL_CAN_RAW, CAN_RAW_FILTER, &rfilter, sizeof(rfilter));
+
   4.1.2 RAW socket option CAN_RAW_ERR_FILTER
 
   As described in chapter 3.4 the CAN interface driver can generate so
diff --git a/Documentation/networking/cdc_mbim.txt b/Documentation/networking/cdc_mbim.txt
new file mode 100644 (file)
index 0000000..a15ea60
--- /dev/null
@@ -0,0 +1,339 @@
+     cdc_mbim - Driver for CDC MBIM Mobile Broadband modems
+    ========================================================
+
+The cdc_mbim driver supports USB devices conforming to the "Universal
+Serial Bus Communications Class Subclass Specification for Mobile
+Broadband Interface Model" [1], which is a further development of
+"Universal Serial Bus Communications Class Subclass Specifications for
+Network Control Model Devices" [2] optimized for Mobile Broadband
+devices, aka "3G/LTE modems".
+
+
+Command Line Parameters
+=======================
+
+The cdc_mbim driver has no parameters of its own.  But the probing
+behaviour for NCM 1.0 backwards compatible MBIM functions (an
+"NCM/MBIM function" as defined in section 3.2 of [1]) is affected
+by a cdc_ncm driver parameter:
+
+prefer_mbim
+-----------
+Type:          Boolean
+Valid Range:   N/Y (0-1)
+Default Value: Y (MBIM is preferred)
+
+This parameter sets the system policy for NCM/MBIM functions.  Such
+functions will be handled by either the cdc_ncm driver or the cdc_mbim
+driver depending on the prefer_mbim setting.  Setting prefer_mbim=N
+makes the cdc_mbim driver ignore these functions and lets the cdc_ncm
+driver handle them instead.
+
+The parameter is writable, and can be changed at any time. A manual
+unbind/bind is required to make the change effective for NCM/MBIM
+functions bound to the "wrong" driver
+
+
+Basic usage
+===========
+
+MBIM functions are inactive when unmanaged. The cdc_mbim driver only
+provides an userspace interface to the MBIM control channel, and will
+not participate in the management of the function. This implies that a
+userspace MBIM management application always is required to enable a
+MBIM function.
+
+Such userspace applications includes, but are not limited to:
+ - mbimcli (included with the libmbim [3] library), and
+ - ModemManager [4]
+
+Establishing a MBIM IP session reequires at least these actions by the
+management application:
+ - open the control channel
+ - configure network connection settings
+ - connect to network
+ - configure IP interface
+
+Management application development
+----------------------------------
+The driver <-> userspace interfaces are described below.  The MBIM
+control channel protocol is described in [1].
+
+
+MBIM control channel userspace ABI
+==================================
+
+/dev/cdc-wdmX character device
+------------------------------
+The driver creates a two-way pipe to the MBIM function control channel
+using the cdc-wdm driver as a subdriver.  The userspace end of the
+control channel pipe is a /dev/cdc-wdmX character device.
+
+The cdc_mbim driver does not process or police messages on the control
+channel.  The channel is fully delegated to the userspace management
+application.  It is therefore up to this application to ensure that it
+complies with all the control channel requirements in [1].
+
+The cdc-wdmX device is created as a child of the MBIM control
+interface USB device.  The character device associated with a specific
+MBIM function can be looked up using sysfs.  For example:
+
+ bjorn@nemi:~$ ls /sys/bus/usb/drivers/cdc_mbim/2-4:2.12/usbmisc
+ cdc-wdm0
+
+ bjorn@nemi:~$ grep . /sys/bus/usb/drivers/cdc_mbim/2-4:2.12/usbmisc/cdc-wdm0/dev
+ 180:0
+
+
+USB configuration descriptors
+-----------------------------
+The wMaxControlMessage field of the CDC MBIM functional descriptor
+limits the maximum control message size. The managament application is
+responsible for negotiating a control message size complying with the
+requirements in section 9.3.1 of [1], taking this descriptor field
+into consideration.
+
+The userspace application can access the CDC MBIM functional
+descriptor of a MBIM function using either of the two USB
+configuration descriptor kernel interfaces described in [6] or [7].
+
+See also the ioctl documentation below.
+
+
+Fragmentation
+-------------
+The userspace application is responsible for all control message
+fragmentation and defragmentaion, as described in section 9.5 of [1].
+
+
+/dev/cdc-wdmX write()
+---------------------
+The MBIM control messages from the management application *must not*
+exceed the negotiated control message size.
+
+
+/dev/cdc-wdmX read()
+--------------------
+The management application *must* accept control messages of up the
+negotiated control message size.
+
+
+/dev/cdc-wdmX ioctl()
+--------------------
+IOCTL_WDM_MAX_COMMAND: Get Maximum Command Size
+This ioctl returns the wMaxControlMessage field of the CDC MBIM
+functional descriptor for MBIM devices.  This is intended as a
+convenience, eliminating the need to parse the USB descriptors from
+userspace.
+
+       #include <stdio.h>
+       #include <fcntl.h>
+       #include <sys/ioctl.h>
+       #include <linux/types.h>
+       #include <linux/usb/cdc-wdm.h>
+       int main()
+       {
+               __u16 max;
+               int fd = open("/dev/cdc-wdm0", O_RDWR);
+               if (!ioctl(fd, IOCTL_WDM_MAX_COMMAND, &max))
+                       printf("wMaxControlMessage is %d\n", max);
+       }
+
+
+Custom device services
+----------------------
+The MBIM specification allows vendors to freely define additional
+services.  This is fully supported by the cdc_mbim driver.
+
+Support for new MBIM services, including vendor specified services, is
+implemented entirely in userspace, like the rest of the MBIM control
+protocol
+
+New services should be registered in the MBIM Registry [5].
+
+
+
+MBIM data channel userspace ABI
+===============================
+
+wwanY network device
+--------------------
+The cdc_mbim driver represents the MBIM data channel as a single
+network device of the "wwan" type. This network device is initially
+mapped to MBIM IP session 0.
+
+
+Multiplexed IP sessions (IPS)
+-----------------------------
+MBIM allows multiplexing up to 256 IP sessions over a single USB data
+channel.  The cdc_mbim driver models such IP sessions as 802.1q VLAN
+subdevices of the master wwanY device, mapping MBIM IP session Z to
+VLAN ID Z for all values of Z greater than 0.
+
+The device maximum Z is given in the MBIM_DEVICE_CAPS_INFO structure
+described in section 10.5.1 of [1].
+
+The userspace management application is responsible for adding new
+VLAN links prior to establishing MBIM IP sessions where the SessionId
+is greater than 0. These links can be added by using the normal VLAN
+kernel interfaces, either ioctl or netlink.
+
+For example, adding a link for a MBIM IP session with SessionId 3:
+
+  ip link add link wwan0 name wwan0.3 type vlan id 3
+
+The driver will automatically map the "wwan0.3" network device to MBIM
+IP session 3.
+
+
+Device Service Streams (DSS)
+----------------------------
+MBIM also allows up to 256 non-IP data streams to be multiplexed over
+the same shared USB data channel.  The cdc_mbim driver models these
+sessions as another set of 802.1q VLAN subdevices of the master wwanY
+device, mapping MBIM DSS session A to VLAN ID (256 + A) for all values
+of A.
+
+The device maximum A is given in the MBIM_DEVICE_SERVICES_INFO
+structure described in section 10.5.29 of [1].
+
+The DSS VLAN subdevices are used as a practical interface between the
+shared MBIM data channel and a MBIM DSS aware userspace application.
+It is not intended to be presented as-is to an end user. The
+assumption is that an userspace application initiating a DSS session
+also takes care of the necessary framing of the DSS data, presenting
+the stream to the end user in an appropriate way for the stream type.
+
+The network device ABI requires a dummy ethernet header for every DSS
+data frame being transported.  The contents of this header is
+arbitrary, with the following exceptions:
+ - TX frames using an IP protocol (0x0800 or 0x86dd) will be dropped
+ - RX frames will have the protocol field set to ETH_P_802_3 (but will
+   not be properly formatted 802.3 frames)
+ - RX frames will have the destination address set to the hardware
+   address of the master device
+
+The DSS supporting userspace management application is responsible for
+adding the dummy ethernet header on TX and stripping it on RX.
+
+This is a simple example using tools commonly available, exporting
+DssSessionId 5 as a pty character device pointed to by a /dev/nmea
+symlink:
+
+  ip link add link wwan0 name wwan0.dss5 type vlan id 261
+  ip link set dev wwan0.dss5 up
+  socat INTERFACE:wwan0.dss5,type=2 PTY:,echo=0,link=/dev/nmea
+
+This is only an example, most suitable for testing out a DSS
+service. Userspace applications supporting specific MBIM DSS services
+are expected to use the tools and programming interfaces required by
+that service.
+
+Note that adding VLAN links for DSS sessions is entirely optional.  A
+management application may instead choose to bind a packet socket
+directly to the master network device, using the received VLAN tags to
+map frames to the correct DSS session and adding 18 byte VLAN ethernet
+headers with the appropriate tag on TX.  In this case using a socket
+filter is recommended, matching only the DSS VLAN subset. This avoid
+unnecessary copying of unrelated IP session data to userspace.  For
+example:
+
+  static struct sock_filter dssfilter[] = {
+       /* use special negative offsets to get VLAN tag */
+       BPF_STMT(BPF_LD|BPF_B|BPF_ABS, SKF_AD_OFF + SKF_AD_VLAN_TAG_PRESENT),
+       BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, 1, 0, 6), /* true */
+
+       /* verify DSS VLAN range */
+       BPF_STMT(BPF_LD|BPF_H|BPF_ABS, SKF_AD_OFF + SKF_AD_VLAN_TAG),
+       BPF_JUMP(BPF_JMP|BPF_JGE|BPF_K, 256, 0, 4),     /* 256 is first DSS VLAN */
+       BPF_JUMP(BPF_JMP|BPF_JGE|BPF_K, 512, 3, 0),     /* 511 is last DSS VLAN */
+
+       /* verify ethertype */
+        BPF_STMT(BPF_LD|BPF_H|BPF_ABS, 2 * ETH_ALEN),
+        BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, ETH_P_802_3, 0, 1),
+
+        BPF_STMT(BPF_RET|BPF_K, (u_int)-1),    /* accept */
+        BPF_STMT(BPF_RET|BPF_K, 0),            /* ignore */
+  };
+
+
+
+Tagged IP session 0 VLAN
+------------------------
+As described above, MBIM IP session 0 is treated as special by the
+driver.  It is initially mapped to untagged frames on the wwanY
+network device.
+
+This mapping implies a few restrictions on multiplexed IPS and DSS
+sessions, which may not always be practical:
+ - no IPS or DSS session can use a frame size greater than the MTU on
+   IP session 0
+ - no IPS or DSS session can be in the up state unless the network
+   device representing IP session 0 also is up
+
+These problems can be avoided by optionally making the driver map IP
+session 0 to a VLAN subdevice, similar to all other IP sessions.  This
+behaviour is triggered by adding a VLAN link for the magic VLAN ID
+4094.  The driver will then immediately start mapping MBIM IP session
+0 to this VLAN, and will drop untagged frames on the master wwanY
+device.
+
+Tip: It might be less confusing to the end user to name this VLAN
+subdevice after the MBIM SessionID instead of the VLAN ID.  For
+example:
+
+  ip link add link wwan0 name wwan0.0 type vlan id 4094
+
+
+VLAN mapping
+------------
+
+Summarizing the cdc_mbim driver mapping described above, we have this
+relationship between VLAN tags on the wwanY network device and MBIM
+sessions on the shared USB data channel:
+
+  VLAN ID       MBIM type   MBIM SessionID           Notes
+  ---------------------------------------------------------
+  untagged      IPS         0                        a)
+  1 - 255       IPS         1 - 255 <VLANID>
+  256 - 511     DSS         0 - 255 <VLANID - 256>
+  512 - 4093                                         b)
+  4094          IPS         0                        c)
+
+    a) if no VLAN ID 4094 link exists, else dropped
+    b) unsupported VLAN range, unconditionally dropped
+    c) if a VLAN ID 4094 link exists, else dropped
+
+
+
+
+References
+==========
+
+[1] USB Implementers Forum, Inc. - "Universal Serial Bus
+      Communications Class Subclass Specification for Mobile Broadband
+      Interface Model", Revision 1.0 (Errata 1), May 1, 2013
+      - http://www.usb.org/developers/docs/devclass_docs/
+
+[2] USB Implementers Forum, Inc. - "Universal Serial Bus
+      Communications Class Subclass Specifications for Network Control
+      Model Devices", Revision 1.0 (Errata 1), November 24, 2010
+      - http://www.usb.org/developers/docs/devclass_docs/
+
+[3] libmbim - "a glib-based library for talking to WWAN modems and
+      devices which speak the Mobile Interface Broadband Model (MBIM)
+      protocol"
+      - http://www.freedesktop.org/wiki/Software/libmbim/
+
+[4] ModemManager - "a DBus-activated daemon which controls mobile
+      broadband (2G/3G/4G) devices and connections"
+      - http://www.freedesktop.org/wiki/Software/ModemManager/
+
+[5] "MBIM (Mobile Broadband Interface Model) Registry"
+       - http://compliance.usb.org/mbim/
+
+[6] "/proc/bus/usb filesystem output"
+       - Documentation/usb/proc_usb_info.txt
+
+[7] "/sys/bus/usb/devices/.../descriptors"
+       - Documentation/ABI/stable/sysfs-bus-usb
index 81f940f4e88480d48c35fd7707d679d646ef0af8..748fd385535d38ee905f48403b5244ccc41d037a 100644 (file)
@@ -281,6 +281,7 @@ Possible BPF extensions are shown in the following table:
   cpu                                   raw_smp_processor_id()
   vlan_tci                              vlan_tx_tag_get(skb)
   vlan_pr                               vlan_tx_tag_present(skb)
+  rand                                  prandom_u32()
 
 These extensions can also be prefixed with '#'.
 Examples for low-level BPF:
@@ -308,6 +309,18 @@ Examples for low-level BPF:
   ret #-1
   drop: ret #0
 
+** icmp random packet sampling, 1 in 4
+  ldh [12]
+  jne #0x800, drop
+  ldb [23]
+  jneq #1, drop
+  # get a random uint32 number
+  ld rand
+  mod #4
+  jneq #1, drop
+  ret #-1
+  drop: ret #0
+
 ** SECCOMP filter example:
 
   ld [4]                  /* offsetof(struct seccomp_data, arch) */
@@ -600,7 +613,7 @@ Some core changes of the new internal format:
 
   Therefore, BPF calling convention is defined as:
 
-    * R0       - return value from in-kernel function
+    * R0       - return value from in-kernel function, and exit value for BPF program
     * R1 - R5  - arguments from BPF program to in-kernel function
     * R6 - R9  - callee saved registers that in-kernel function will preserve
     * R10      - read-only frame pointer to access stack
@@ -646,9 +659,140 @@ Some core changes of the new internal format:
 - Introduces bpf_call insn and register passing convention for zero overhead
   calls from/to other kernel functions:
 
-  After a kernel function call, R1 - R5 are reset to unreadable and R0 has a
-  return type of the function. Since R6 - R9 are callee saved, their state is
-  preserved across the call.
+  Before an in-kernel function call, the internal BPF program needs to
+  place function arguments into R1 to R5 registers to satisfy calling
+  convention, then the interpreter will take them from registers and pass
+  to in-kernel function. If R1 - R5 registers are mapped to CPU registers
+  that are used for argument passing on given architecture, the JIT compiler
+  doesn't need to emit extra moves. Function arguments will be in the correct
+  registers and BPF_CALL instruction will be JITed as single 'call' HW
+  instruction. This calling convention was picked to cover common call
+  situations without performance penalty.
+
+  After an in-kernel function call, R1 - R5 are reset to unreadable and R0 has
+  a return value of the function. Since R6 - R9 are callee saved, their state
+  is preserved across the call.
+
+  For example, consider three C functions:
+
+  u64 f1() { return (*_f2)(1); }
+  u64 f2(u64 a) { return f3(a + 1, a); }
+  u64 f3(u64 a, u64 b) { return a - b; }
+
+  GCC can compile f1, f3 into x86_64:
+
+  f1:
+    movl $1, %edi
+    movq _f2(%rip), %rax
+    jmp  *%rax
+  f3:
+    movq %rdi, %rax
+    subq %rsi, %rax
+    ret
+
+  Function f2 in BPF may look like:
+
+  f2:
+    bpf_mov R2, R1
+    bpf_add R1, 1
+    bpf_call f3
+    bpf_exit
+
+  If f2 is JITed and the pointer stored to '_f2'. The calls f1 -> f2 -> f3 and
+  returns will be seamless. Without JIT, __sk_run_filter() interpreter needs to
+  be used to call into f2.
+
+  For practical reasons all BPF programs have only one argument 'ctx' which is
+  already placed into R1 (e.g. on __sk_run_filter() startup) and the programs
+  can call kernel functions with up to 5 arguments. Calls with 6 or more arguments
+  are currently not supported, but these restrictions can be lifted if necessary
+  in the future.
+
+  On 64-bit architectures all register map to HW registers one to one. For
+  example, x86_64 JIT compiler can map them as ...
+
+    R0 - rax
+    R1 - rdi
+    R2 - rsi
+    R3 - rdx
+    R4 - rcx
+    R5 - r8
+    R6 - rbx
+    R7 - r13
+    R8 - r14
+    R9 - r15
+    R10 - rbp
+
+  ... since x86_64 ABI mandates rdi, rsi, rdx, rcx, r8, r9 for argument passing
+  and rbx, r12 - r15 are callee saved.
+
+  Then the following internal BPF pseudo-program:
+
+    bpf_mov R6, R1 /* save ctx */
+    bpf_mov R2, 2
+    bpf_mov R3, 3
+    bpf_mov R4, 4
+    bpf_mov R5, 5
+    bpf_call foo
+    bpf_mov R7, R0 /* save foo() return value */
+    bpf_mov R1, R6 /* restore ctx for next call */
+    bpf_mov R2, 6
+    bpf_mov R3, 7
+    bpf_mov R4, 8
+    bpf_mov R5, 9
+    bpf_call bar
+    bpf_add R0, R7
+    bpf_exit
+
+  After JIT to x86_64 may look like:
+
+    push %rbp
+    mov %rsp,%rbp
+    sub $0x228,%rsp
+    mov %rbx,-0x228(%rbp)
+    mov %r13,-0x220(%rbp)
+    mov %rdi,%rbx
+    mov $0x2,%esi
+    mov $0x3,%edx
+    mov $0x4,%ecx
+    mov $0x5,%r8d
+    callq foo
+    mov %rax,%r13
+    mov %rbx,%rdi
+    mov $0x2,%esi
+    mov $0x3,%edx
+    mov $0x4,%ecx
+    mov $0x5,%r8d
+    callq bar
+    add %r13,%rax
+    mov -0x228(%rbp),%rbx
+    mov -0x220(%rbp),%r13
+    leaveq
+    retq
+
+  Which is in this example equivalent in C to:
+
+    u64 bpf_filter(u64 ctx)
+    {
+        return foo(ctx, 2, 3, 4, 5) + bar(ctx, 6, 7, 8, 9);
+    }
+
+  In-kernel functions foo() and bar() with prototype: u64 (*)(u64 arg1, u64
+  arg2, u64 arg3, u64 arg4, u64 arg5); will receive arguments in proper
+  registers and place their return value into '%rax' which is R0 in BPF.
+  Prologue and epilogue are emitted by JIT and are implicit in the
+  interpreter. R0-R5 are scratch registers, so BPF program needs to preserve
+  them across the calls as defined by calling convention.
+
+  For example the following program is invalid:
+
+    bpf_mov R1, 1
+    bpf_call foo
+    bpf_mov R0, R1
+    bpf_exit
+
+  After the call the registers R1-R5 contain junk values and cannot be read.
+  In the future a BPF verifier can be used to validate internal BPF programs.
 
 Also in the new design, BPF is limited to 4096 insns, which means that any
 program will terminate quickly and will only call a fixed number of kernel
@@ -663,6 +807,25 @@ A program, that is translated internally consists of the following elements:
 
   op:16, jt:8, jf:8, k:32    ==>    op:8, a_reg:4, x_reg:4, off:16, imm:32
 
+So far 87 internal BPF instructions were implemented. 8-bit 'op' opcode field
+has room for new instructions. Some of them may use 16/24/32 byte encoding. New
+instructions must be multiple of 8 bytes to preserve backward compatibility.
+
+Internal BPF is a general purpose RISC instruction set. Not every register and
+every instruction are used during translation from original BPF to new format.
+For example, socket filters are not using 'exclusive add' instruction, but
+tracing filters may do to maintain counters of events, for example. Register R9
+is not used by socket filters either, but more complex filters may be running
+out of registers and would have to resort to spill/fill to stack.
+
+Internal BPF can used as generic assembler for last step performance
+optimizations, socket filters and seccomp are using it as assembler. Tracing
+filters may use it as assembler to generate code from kernel. In kernel usage
+may not be bounded by security considerations, since generated internal BPF code
+may be optimizing internal code path and not being exposed to the user space.
+Safety of internal BPF can come from a verifier (TBD). In such use cases as
+described, it may be used as safe instruction set.
+
 Just like the original BPF, the new format runs within a controlled environment,
 is deterministic and the kernel can easily prove that. The safety of the program
 can be determined in two steps: first step does depth-first-search to disallow
index ca6977f5b2ed066f49823c0d7c0129a9a16b0820..99ca40e8e810888d30bbb9726eb2de2e537c3e79 100644 (file)
@@ -429,7 +429,7 @@ RPS and RFS were introduced in kernel 2.6.35. XPS was incorporated into
 (therbert@google.com)
 
 Accelerated RFS was introduced in 2.6.35. Original patches were
-submitted by Ben Hutchings (bhutchings@solarflare.com)
+submitted by Ben Hutchings (bwh@kernel.org)
 
 Authors:
 Tom Herbert (therbert@google.com)
index e67ea244204163a5d0eb9e43239c5ccd4394bae1..f5de16eb19555345423c6421753435e98deb8480 100644 (file)
@@ -1967,6 +1967,12 @@ S:       Maintained
 F:     drivers/bcma/
 F:     include/linux/bcma/
 
+BROADCOM SYSTEMPORT ETHERNET DRIVER
+M:     Florian Fainelli <f.fainelli@gmail.com>
+L:     netdev@vger.kernel.org
+S:     Supported
+F:     drivers/net/ethernet/broadcom/bcmsysport.*
+
 BROCADE BFA FC SCSI DRIVER
 M:     Anil Gurumurthy <anil.gurumurthy@qlogic.com>
 M:     Sudarsana Kalluru <sudarsana.kalluru@qlogic.com>
@@ -2222,9 +2228,8 @@ F:        drivers/platform/chrome/
 CISCO VIC ETHERNET NIC DRIVER
 M:     Christian Benvenuti <benve@cisco.com>
 M:     Sujith Sankar <ssujith@cisco.com>
-M:     Govindarajulu Varadarajan <govindarajulu90@gmail.com>
+M:     Govindarajulu Varadarajan <_govind@gmx.com>
 M:     Neel Patel <neepatel@cisco.com>
-M:     Nishank Trivedi <nistrive@cisco.com>
 S:     Supported
 F:     drivers/net/ethernet/cisco/enic/
 
@@ -3485,6 +3490,12 @@ S:       Maintained
 F:     drivers/extcon/
 F:     Documentation/extcon/
 
+EXYNOS DP DRIVER
+M:     Jingoo Han <jg1.han@samsung.com>
+L:     dri-devel@lists.freedesktop.org
+S:     Maintained
+F:     drivers/gpu/drm/exynos/exynos_dp*
+
 EXYNOS MIPI DISPLAY DRIVERS
 M:     Inki Dae <inki.dae@samsung.com>
 M:     Donghwa Lee <dh09.lee@samsung.com>
@@ -3550,7 +3561,7 @@ F:        include/scsi/libfcoe.h
 F:     include/uapi/scsi/fc/
 
 FILE LOCKING (flock() and fcntl()/lockf())
-M:     Jeff Layton <jlayton@redhat.com>
+M:     Jeff Layton <jlayton@poochiereds.net>
 M:     J. Bruce Fields <bfields@fieldses.org>
 L:     linux-fsdevel@vger.kernel.org
 S:     Maintained
@@ -5108,14 +5119,19 @@ F:      drivers/s390/kvm/
 
 KERNEL VIRTUAL MACHINE (KVM) FOR ARM
 M:     Christoffer Dall <christoffer.dall@linaro.org>
+M:     Marc Zyngier <marc.zyngier@arm.com>
+L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 L:     kvmarm@lists.cs.columbia.edu
 W:     http://systems.cs.columbia.edu/projects/kvm-arm
 S:     Supported
 F:     arch/arm/include/uapi/asm/kvm*
 F:     arch/arm/include/asm/kvm*
 F:     arch/arm/kvm/
+F:     virt/kvm/arm/
+F:     include/kvm/arm_*
 
 KERNEL VIRTUAL MACHINE FOR ARM64 (KVM/arm64)
+M:     Christoffer Dall <christoffer.dall@linaro.org>
 M:     Marc Zyngier <marc.zyngier@arm.com>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 L:     kvmarm@lists.cs.columbia.edu
@@ -7277,7 +7293,6 @@ F:        drivers/video/aty/aty128fb.c
 RALINK RT2X00 WIRELESS LAN DRIVER
 P:     rt2x00 project
 M:     Ivo van Doorn <IvDoorn@gmail.com>
-M:     Gertjan van Wingerde <gwingerde@gmail.com>
 M:     Helmut Schaa <helmut.schaa@googlemail.com>
 L:     linux-wireless@vger.kernel.org
 L:     users@rt2x00.serialmonkey.com (moderated for non-subscribers)
@@ -7293,7 +7308,7 @@ F:        Documentation/blockdev/ramdisk.txt
 F:     drivers/block/brd.c
 
 RANDOM NUMBER DRIVER
-M:     Theodore Ts'o" <tytso@mit.edu>
+M:     "Theodore Ts'o" <tytso@mit.edu>
 S:     Maintained
 F:     drivers/char/random.c
 
@@ -7674,7 +7689,6 @@ F:        drivers/clk/samsung/
 SAMSUNG SXGBE DRIVERS
 M:     Byungho An <bh74.an@samsung.com>
 M:     Girish K S <ks.giri@samsung.com>
-M:     Siva Reddy Kallam <siva.kallam@samsung.com>
 M:     Vipul Pandya <vipul.pandya@samsung.com>
 S:     Supported
 L:     netdev@vger.kernel.org
index 60ccbfe750a2641db2e43635885985b9e49318ed..28a7259e0f3b53ed0ee9242fcea45abc71c004f1 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 3
 PATCHLEVEL = 15
 SUBLEVEL = 0
-EXTRAVERSION = -rc1
+EXTRAVERSION = -rc4
 NAME = Shuffling Zombie Juror
 
 # *DOCUMENTATION*
index 819dd5f7eb055ec87f082188e3d7ebb4a8b683aa..29b82adbf0b401685b269f81a22d29ffd4fd97a5 100644 (file)
@@ -614,11 +614,13 @@ resume_user_mode_begin:
 
 resume_kernel_mode:
 
-#ifdef CONFIG_PREEMPT
-
-       ; This is a must for preempt_schedule_irq()
+       ; Disable Interrupts from this point on
+       ; CONFIG_PREEMPT: This is a must for preempt_schedule_irq()
+       ; !CONFIG_PREEMPT: To ensure restore_regs is intr safe
        IRQ_DISABLE     r9
 
+#ifdef CONFIG_PREEMPT
+
        ; Can't preempt if preemption disabled
        GET_CURR_THR_INFO_FROM_SP   r10
        ld  r8, [r10, THREAD_INFO_PREEMPT_COUNT]
index ab438cb5af5570f5aae9b3215b9c73586ce80427..db3c5414223e7298346c6338665263d5f51c0e3c 100644 (file)
@@ -30,9 +30,9 @@ config ARM
        select HAVE_ARCH_SECCOMP_FILTER if (AEABI && !OABI_COMPAT)
        select HAVE_ARCH_TRACEHOOK
        select HAVE_BPF_JIT
+       select HAVE_CC_STACKPROTECTOR
        select HAVE_CONTEXT_TRACKING
        select HAVE_C_RECORDMCOUNT
-       select HAVE_CC_STACKPROTECTOR
        select HAVE_DEBUG_KMEMLEAK
        select HAVE_DMA_API_DEBUG
        select HAVE_DMA_ATTRS
@@ -311,6 +311,7 @@ config ARCH_MULTIPLATFORM
        select ARM_HAS_SG_CHAIN
        select ARM_PATCH_PHYS_VIRT
        select AUTO_ZRELADDR
+       select CLKSRC_OF
        select COMMON_CLK
        select GENERIC_CLOCKEVENTS
        select MULTI_IRQ_HANDLER
@@ -422,8 +423,8 @@ config ARCH_EFM32
        bool "Energy Micro efm32"
        depends on !MMU
        select ARCH_REQUIRE_GPIOLIB
-       select AUTO_ZRELADDR
        select ARM_NVIC
+       select AUTO_ZRELADDR
        select CLKSRC_OF
        select COMMON_CLK
        select CPU_V7M
@@ -511,8 +512,8 @@ config ARCH_IXP4XX
        bool "IXP4xx-based"
        depends on MMU
        select ARCH_HAS_DMA_SET_COHERENT_MASK
-       select ARCH_SUPPORTS_BIG_ENDIAN
        select ARCH_REQUIRE_GPIOLIB
+       select ARCH_SUPPORTS_BIG_ENDIAN
        select CLKSRC_MMIO
        select CPU_XSCALE
        select DMABOUNCE if PCI
@@ -1110,9 +1111,9 @@ config ARM_NR_BANKS
        default 8
 
 config IWMMXT
-       bool "Enable iWMMXt support" if !CPU_PJ4
-       depends on CPU_XSCALE || CPU_XSC3 || CPU_MOHAWK || CPU_PJ4
-       default y if PXA27x || PXA3xx || ARCH_MMP || CPU_PJ4
+       bool "Enable iWMMXt support"
+       depends on CPU_XSCALE || CPU_XSC3 || CPU_MOHAWK || CPU_PJ4 || CPU_PJ4B
+       default y if PXA27x || PXA3xx || ARCH_MMP || CPU_PJ4 || CPU_PJ4B
        help
          Enable support for iWMMXt context switching at run time if
          running on a CPU that supports it.
@@ -1575,8 +1576,8 @@ config BIG_LITTLE
 config BL_SWITCHER
        bool "big.LITTLE switcher support"
        depends on BIG_LITTLE && MCPM && HOTPLUG_CPU
-       select CPU_PM
        select ARM_CPU_SUSPEND
+       select CPU_PM
        help
          The big.LITTLE "switcher" provides the core functionality to
          transparently handle transition between a cluster of A15's
@@ -1920,9 +1921,9 @@ config XEN
        depends on CPU_V7 && !CPU_V6
        depends on !GENERIC_ATOMIC64
        depends on MMU
+       select ARCH_DMA_ADDR_T_64BIT
        select ARM_PSCI
        select SWIOTLB_XEN
-       select ARCH_DMA_ADDR_T_64BIT
        help
          Say Y if you want to run Linux in a Virtual Machine on Xen on ARM.
 
index 4a2fc0bf6fc913683c29bc2113b85b1f3db9bdd3..eab8ecbe69c1d271369700a9f7aac14c628685b0 100644 (file)
@@ -1030,9 +1030,9 @@ config DEBUG_UART_PHYS
        default 0x40100000 if DEBUG_PXA_UART1
        default 0x42000000 if ARCH_GEMINI
        default 0x7c0003f8 if FOOTBRIDGE
-       default 0x80230000 if DEBUG_PICOXCELL_UART
        default 0x80070000 if DEBUG_IMX23_UART
        default 0x80074000 if DEBUG_IMX28_UART
+       default 0x80230000 if DEBUG_PICOXCELL_UART
        default 0x808c0000 if ARCH_EP93XX
        default 0x90020000 if DEBUG_NSPIRE_CLASSIC_UART || DEBUG_NSPIRE_CX_UART
        default 0xb0090000 if DEBUG_VEXPRESS_UART0_CRX
@@ -1096,22 +1096,22 @@ config DEBUG_UART_VIRT
        default 0xfeb26000 if DEBUG_RK3X_UART1
        default 0xfeb30c00 if DEBUG_KEYSTONE_UART0
        default 0xfeb31000 if DEBUG_KEYSTONE_UART1
-       default 0xfec12000 if DEBUG_MVEBU_UART || DEBUG_MVEBU_UART_ALTERNATE
-       default 0xfed60000 if DEBUG_RK29_UART0
-       default 0xfed64000 if DEBUG_RK29_UART1 || DEBUG_RK3X_UART2
-       default 0xfed68000 if DEBUG_RK29_UART2 || DEBUG_RK3X_UART3
        default 0xfec02000 if DEBUG_SOCFPGA_UART
+       default 0xfec12000 if DEBUG_MVEBU_UART || DEBUG_MVEBU_UART_ALTERNATE
        default 0xfec20000 if DEBUG_DAVINCI_DMx_UART0
        default 0xfed0c000 if DEBUG_DAVINCI_DA8XX_UART1
        default 0xfed0d000 if DEBUG_DAVINCI_DA8XX_UART2
        default 0xfed12000 if ARCH_KIRKWOOD
+       default 0xfed60000 if DEBUG_RK29_UART0
+       default 0xfed64000 if DEBUG_RK29_UART1 || DEBUG_RK3X_UART2
+       default 0xfed68000 if DEBUG_RK29_UART2 || DEBUG_RK3X_UART3
        default 0xfedc0000 if ARCH_EP93XX
        default 0xfee003f8 if FOOTBRIDGE
        default 0xfee20000 if DEBUG_NSPIRE_CLASSIC_UART || DEBUG_NSPIRE_CX_UART
-       default 0xfef36000 if DEBUG_HIGHBANK_UART
        default 0xfee82340 if ARCH_IOP13XX
        default 0xfef00000 if ARCH_IXP4XX && !CPU_BIG_ENDIAN
        default 0xfef00003 if ARCH_IXP4XX && CPU_BIG_ENDIAN
+       default 0xfef36000 if DEBUG_HIGHBANK_UART
        default 0xfefff700 if ARCH_IOP33X
        default 0xff003000 if DEBUG_U300_UART
        default DEBUG_UART_PHYS if !MMU
index 35c146f31e46effa1b3b64cd69fd3a3b76ab38cb..377b7c3640337ed994107814836d16909ddbd447 100644 (file)
@@ -51,10 +51,9 @@ dtb-$(CONFIG_ARCH_AT91)      += sama5d36ek.dtb
 
 dtb-$(CONFIG_ARCH_ATLAS6) += atlas6-evb.dtb
 dtb-$(CONFIG_ARCH_BCM2835) += bcm2835-rpi-b.dtb
+dtb-$(CONFIG_ARCH_BCM_5301X) += bcm4708-netgear-r6250.dtb
 dtb-$(CONFIG_ARCH_BCM_MOBILE) += bcm28155-ap.dtb \
        bcm21664-garnet.dtb
-dtb-$(CONFIG_ARCH_BCM2835) += bcm2835-rpi-b.dtb
-dtb-$(CONFIG_ARCH_BCM_5301X) += bcm4708-netgear-r6250.dtb
 dtb-$(CONFIG_ARCH_BERLIN) += \
        berlin2-sony-nsz-gs7.dtb        \
        berlin2cd-google-chromecast.dtb
@@ -246,6 +245,7 @@ dtb-$(CONFIG_ARCH_OMAP2PLUS) += omap2420-h4.dtb \
        omap3-sbc-t3730.dtb \
        omap3-devkit8000.dtb \
        omap3-beagle-xm.dtb \
+       omap3-beagle-xm-ab.dtb \
        omap3-evm.dtb \
        omap3-evm-37xx.dtb \
        omap3-ldp.dtb \
@@ -294,13 +294,6 @@ dtb-$(CONFIG_ARCH_PRIMA2) += prima2-evb.dtb
 dtb-$(CONFIG_ARCH_QCOM) += qcom-msm8660-surf.dtb \
        qcom-msm8960-cdp.dtb \
        qcom-apq8074-dragonboard.dtb
-dtb-$(CONFIG_ARCH_U8500) += ste-snowball.dtb \
-       ste-hrefprev60-stuib.dtb \
-       ste-hrefprev60-tvk.dtb \
-       ste-hrefv60plus-stuib.dtb \
-       ste-hrefv60plus-tvk.dtb \
-       ste-ccu8540.dtb \
-       ste-ccu9540.dtb
 dtb-$(CONFIG_ARCH_S3C24XX) += s3c2416-smdk2416.dtb
 dtb-$(CONFIG_ARCH_S3C64XX) += s3c6410-mini6410.dtb \
        s3c6410-smdk6410.dtb
@@ -369,9 +362,16 @@ dtb-$(CONFIG_ARCH_TEGRA) += tegra20-harmony.dtb \
        tegra30-cardhu-a04.dtb \
        tegra114-dalmore.dtb \
        tegra124-venice2.dtb
+dtb-$(CONFIG_ARCH_U300) += ste-u300.dtb
+dtb-$(CONFIG_ARCH_U8500) += ste-snowball.dtb \
+       ste-hrefprev60-stuib.dtb \
+       ste-hrefprev60-tvk.dtb \
+       ste-hrefv60plus-stuib.dtb \
+       ste-hrefv60plus-tvk.dtb \
+       ste-ccu8540.dtb \
+       ste-ccu9540.dtb
 dtb-$(CONFIG_ARCH_VERSATILE) += versatile-ab.dtb \
        versatile-pb.dtb
-dtb-$(CONFIG_ARCH_U300) += ste-u300.dtb
 dtb-$(CONFIG_ARCH_VEXPRESS) += vexpress-v2p-ca5s.dtb \
        vexpress-v2p-ca9.dtb \
        vexpress-v2p-ca15-tc1.dtb \
index e3f27ec317182b887961c0a58ca49f66407935d8..2e7d932887b50185e95df69513986635e1403b39 100644 (file)
 &usb {
        status = "okay";
 
-       control@44e10000 {
+       control@44e10620 {
                status = "okay";
        };
 
                dr_mode = "host";
        };
 
-       dma-controller@07402000  {
+       dma-controller@47402000  {
                status = "okay";
        };
 };
index 28ae040e7c3d90b9094afc8c6543cf4749ffe5b0..6028217ace0fab2fb2476d36980521cab3db07e3 100644 (file)
 
        am335x_evm_audio_pins: am335x_evm_audio_pins {
                pinctrl-single,pins = <
-                       0x10c (PIN_INPUT_PULLDOWN | MUX_MODE4) /* mii1_rx_dv.mcasp1_aclkx */
-                       0x110 (PIN_INPUT_PULLDOWN | MUX_MODE4) /* mii1_txd3.mcasp1_fsx */
+                       0x10c (PIN_INPUT_PULLDOWN | MUX_MODE4) /* mii1_crs.mcasp1_aclkx */
+                       0x110 (PIN_INPUT_PULLDOWN | MUX_MODE4) /* mii1_rxerr.mcasp1_fsx */
                        0x108 (PIN_OUTPUT_PULLDOWN | MUX_MODE4) /* mii1_col.mcasp1_axr2 */
                        0x144 (PIN_INPUT_PULLDOWN | MUX_MODE4) /* rmii1_ref_clk.mcasp1_axr3 */
                >;
 &usb {
        status = "okay";
 
-       control@44e10000 {
+       control@44e10620 {
                status = "okay";
        };
 
                dr_mode = "host";
        };
 
-       dma-controller@07402000  {
+       dma-controller@47402000  {
                status = "okay";
        };
 };
index ec08f6f677c3eb4a2025a096b30e941efbd706a0..ab238850a7b21947de099158a6712dda1ce39fcb 100644 (file)
 &usb {
        status = "okay";
 
-       control@44e10000 {
+       control@44e10620 {
                status = "okay";
        };
 
                dr_mode = "host";
        };
 
-       dma-controller@07402000  {
+       dma-controller@47402000  {
                status = "okay";
        };
 };
index 7063311a58d96785dd297f89b982970a59a185bb..9f22c189f6361194a5d5705a7fae1ba19235043d 100644 (file)
                reg = <0 0 0>; /* CS0, offset 0 */
                nand-bus-width = <8>;
                ti,nand-ecc-opt = "bch8";
-               gpmc,device-nand = "true";
                gpmc,device-width = <1>;
                gpmc,sync-clk-ps = <0>;
                gpmc,cs-on-ns = <0>;
 &usb {
        status = "okay";
 
-       control@44e10000 {
+       control@44e10620 {
                status = "okay";
        };
 
                dr_mode = "host";
        };
 
-       dma-controller@07402000  {
+       dma-controller@47402000  {
                status = "okay";
        };
 };
index 9770e35f25361644ade6c8846ba5cb7571822f0c..baf56cc92040ed27ecb006ef32defaef6f7cf037 100644 (file)
@@ -72,7 +72,7 @@
        };
 
        /*
-        * The soc node represents the soc top level view. It is uses for IPs
+        * The soc node represents the soc top level view. It is used for IPs
         * that are not memory mapped in the MPU view or for the MPU itself.
         */
        soc {
@@ -94,8 +94,8 @@
 
        /*
         * XXX: Use a flat representation of the AM33XX interconnect.
-        * The real AM33XX interconnect network is quite complex.Since
-        * that will not bring real advantage to represent that in DT
+        * The real AM33XX interconnect network is quite complex. Since
+        * it will not bring real advantage to represent that in DT
         * for the moment, just use a fake OCP bus entry to represent
         * the whole bus hierarchy.
         */
                mac: ethernet@4a100000 {
                        compatible = "ti,cpsw";
                        ti,hwmods = "cpgmac0";
+                       clocks = <&cpsw_125mhz_gclk>, <&cpsw_cpts_rft_clk>;
+                       clock-names = "fck", "cpts";
                        cpdma_channels = <8>;
                        ale_entries = <1024>;
                        bd_ram_size = <0x2000>;
                              <0x46000000 0x400000>;
                        reg-names = "mpu", "dat";
                        interrupts = <80>, <81>;
-                       interrupts-names = "tx", "rx";
+                       interrupt-names = "tx", "rx";
                        status = "disabled";
                        dmas = <&edma 8>,
                                <&edma 9>;
                              <0x46400000 0x400000>;
                        reg-names = "mpu", "dat";
                        interrupts = <82>, <83>;
-                       interrupts-names = "tx", "rx";
+                       interrupt-names = "tx", "rx";
                        status = "disabled";
                        dmas = <&edma 10>,
                                <&edma 11>;
index 36d523a268314d3e1948dd894ae6b07141ac946e..03a2255051260ce50f8480f974316996dbae57f4 100644 (file)
                        #address-cells = <1>;
                        #size-cells = <1>;
                        ti,hwmods = "cpgmac0";
+                       clocks = <&cpsw_125mhz_gclk>, <&cpsw_cpts_rft_clk>;
+                       clock-names = "fck", "cpts";
                        status = "disabled";
                        cpdma_channels = <8>;
                        ale_entries = <1024>;
                              <0x46000000 0x400000>;
                        reg-names = "mpu", "dat";
                        interrupts = <80>, <81>;
-                       interrupts-names = "tx", "rx";
+                       interrupt-names = "tx", "rx";
                        status = "disabled";
                        dmas = <&edma 8>,
                               <&edma 9>;
                              <0x46400000 0x400000>;
                        reg-names = "mpu", "dat";
                        interrupts = <82>, <83>;
-                       interrupts-names = "tx", "rx";
+                       interrupt-names = "tx", "rx";
                        status = "disabled";
                        dmas = <&edma 10>,
                               <&edma 11>;
index bbb40f62037dbaf67ac8a585a17817997e8c1a17..bb77970c0b1223499137ef80079ad39446919f42 100644 (file)
                                #size-cells = <0>;
                                compatible = "marvell,orion-mdio";
                                reg = <0x72004 0x4>;
+                               clocks = <&gateclk 4>;
                        };
 
                        eth1: ethernet@74000 {
index a064f59da02d566b4bfb994376e53e6d51693797..ca8813bb99ba627d52e7c9d163005d39481f3e1a 100644 (file)
                                #size-cells = <0>;
                                compatible = "marvell,orion-mdio";
                                reg = <0x72004 0x4>;
+                               clocks = <&gateclk 4>;
                        };
 
                        coredivclk: clock@e4250 {
index c2242745b9b87a29afcfcc2fc77bf9178d5814f9..3bb8c008b14c822b26487d1c185ef18673aff1cf 100644 (file)
                        ethernet@30000 {
                                status = "okay";
                                phy-mode = "sgmii";
+                               fixed-link {
+                                       speed = <1000>;
+                                       full-duplex;
+                               };
                        };
 
                        pcie-controller {
index 1c0f8e1893aed11eb3dfb8ca43360cbed2e961f6..149b5509993588aa17971d6fbc7a56f314c658e5 100644 (file)
@@ -80,7 +80,7 @@
        };
 
        /*
-        * The soc node represents the soc top level view. It is uses for IPs
+        * The soc node represents the soc top level view. It is used for IPs
         * that are not memory mapped in the MPU view or for the MPU itself.
         */
        soc {
@@ -94,7 +94,7 @@
        /*
         * XXX: Use a flat representation of the SOC interconnect.
         * The real OMAP interconnect network is quite complex.
-        * Since that will not bring real advantage to represent that in DT for
+        * Since it will not bring real advantage to represent that in DT for
         * the moment, just use a fake OCP bus entry to represent the whole bus
         * hierarchy.
         */
index e96da9a898ad5cda61bc6b1b3195179c5c513eb6..cfb8fc753f5037087d7bbdfa6744d1a8fc283ec9 100644 (file)
                #clock-cells = <0>;
                compatible = "ti,mux-clock";
                clocks = <&abe_24m_fclk>, <&abe_sys_clk_div>, <&func_24m_clk>, <&atlclkin3_ck>, <&atl_clkin2_ck>, <&atl_clkin1_ck>, <&atl_clkin0_ck>, <&sys_clkin2>, <&ref_clkin0_ck>, <&ref_clkin1_ck>, <&ref_clkin2_ck>, <&ref_clkin3_ck>, <&mlb_clk>, <&mlbp_clk>;
-               ti,bit-shift = <28>;
+               ti,bit-shift = <24>;
                reg = <0x1860>;
        };
 
index 32f760e24898df9010b22b9efd21f400e5da5ab8..ea323f09dc78f83ecbf934ff894ef25f79f4a99e 100644 (file)
@@ -56,6 +56,7 @@
 
                osc {
                        compatible = "fsl,imx-osc", "fixed-clock";
+                       #clock-cells = <0>;
                        clock-frequency = <24000000>;
                };
        };
index 09f57b39e3ef37e7df1abd4321213bcca8626db5..73aae4f5e539dc3c6a9e68f642e0840e90e0f9e0 100644 (file)
@@ -29,6 +29,7 @@
 
                osc26m {
                        compatible = "fsl,imx-osc26m", "fixed-clock";
+                       #clock-cells = <0>;
                        clock-frequency = <0>;
                };
        };
index 6279e0b4f7683106439c062209e3c9101f0ea7ad..137e010eab35bebd9cad861574713ce9288adc68 100644 (file)
@@ -48,6 +48,7 @@
 
                osc26m {
                        compatible = "fsl,imx-osc26m", "fixed-clock";
+                       #clock-cells = <0>;
                        clock-frequency = <26000000>;
                };
        };
index 0c75fe3deb351d5210a513e442e20e74ba66f397..9c89d1ca97c2ce771a587c3066f968be22b52005 100644 (file)
 
                ckil {
                        compatible = "fsl,imx-ckil", "fixed-clock";
+                       #clock-cells = <0>;
                        clock-frequency = <32768>;
                };
 
                ckih1 {
                        compatible = "fsl,imx-ckih1", "fixed-clock";
+                       #clock-cells = <0>;
                        clock-frequency = <22579200>;
                };
 
                ckih2 {
                        compatible = "fsl,imx-ckih2", "fixed-clock";
+                       #clock-cells = <0>;
                        clock-frequency = <0>;
                };
 
                osc {
                        compatible = "fsl,imx-osc", "fixed-clock";
+                       #clock-cells = <0>;
                        clock-frequency = <24000000>;
                };
        };
index 5f8216d08f6b5f4ff98e13df047ab9b27ee83706..150bb4e2f744374fd712895ce30786dc9fdb7b25 100644 (file)
 
                ckil {
                        compatible = "fsl,imx-ckil", "fixed-clock";
+                       #clock-cells = <0>;
                        clock-frequency = <32768>;
                };
 
                ckih1 {
                        compatible = "fsl,imx-ckih1", "fixed-clock";
+                       #clock-cells = <0>;
                        clock-frequency = <0>;
                };
 
                ckih2 {
                        compatible = "fsl,imx-ckih2", "fixed-clock";
+                       #clock-cells = <0>;
                        clock-frequency = <0>;
                };
 
                osc {
                        compatible = "fsl,imx-osc", "fixed-clock";
+                       #clock-cells = <0>;
                        clock-frequency = <24000000>;
                };
        };
index f6d3ac3e55872657601c8a1785c8dd1e65707632..d5d146a8b149cd14601cef1b26857002d27eec9b 100644 (file)
@@ -17,7 +17,8 @@
        compatible = "denx,imx53-m53evk", "fsl,imx53";
 
        memory {
-               reg = <0x70000000 0x20000000>;
+               reg = <0x70000000 0x20000000>,
+                     <0xb0000000 0x20000000>;
        };
 
        soc {
                irq-trigger = <0x1>;
 
                stmpe_touchscreen {
-                       compatible = "stmpe,ts";
+                       compatible = "st,stmpe-ts";
                        reg = <0>;
-                       ts,sample-time = <4>;
-                       ts,mod-12b = <1>;
-                       ts,ref-sel = <0>;
-                       ts,adc-freq = <1>;
-                       ts,ave-ctrl = <3>;
-                       ts,touch-det-delay = <3>;
-                       ts,settling = <4>;
-                       ts,fraction-z = <7>;
-                       ts,i-drive = <1>;
+                       st,sample-time = <4>;
+                       st,mod-12b = <1>;
+                       st,ref-sel = <0>;
+                       st,adc-freq = <1>;
+                       st,ave-ctrl = <3>;
+                       st,touch-det-delay = <3>;
+                       st,settling = <4>;
+                       st,fraction-z = <7>;
+                       st,i-drive = <1>;
                };
        };
 
index 3f825a6813dae47a2412aebbf42788ca27cf8a26..ede04fa4161f63aeb925e35267608a12a0c5b0cc 100644 (file)
@@ -14,7 +14,8 @@
 
 / {
        memory {
-               reg = <0x70000000 0x40000000>;
+               reg = <0x70000000 0x20000000>,
+                     <0xb0000000 0x20000000>;
        };
 
        display0: display@di0 {
index 0217dde3b36b474d19d22a6b7192fb2c4aeefd61..3b73e81dc3f0df58507a7a6a3ae0556f9abee7dd 100644 (file)
        soc {
                display: display@di0 {
                        compatible = "fsl,imx-parallel-display";
-                       crtcs = <&ipu 0>;
                        interface-pix-fmt = "rgb24";
                        pinctrl-names = "default";
                        pinctrl-0 = <&pinctrl_rgb24_vga1>;
                        status = "okay";
 
+                       port {
+                               display0_in: endpoint {
+                                       remote-endpoint = <&ipu_di0_disp0>;
+                               };
+                       };
+
                        display-timings {
                                VGA {
                                        clock-frequency = <25200000>;
        };
 };
 
+&ipu_di0_disp0 {
+       remote-endpoint = <&display0_in>;
+};
+
 &kpp {
        pinctrl-names = "default";
        pinctrl-0 = <&pinctrl_kpp>;
index b57ab57740f686a96200b9a51c63c7d982d82644..9c2bff2252d0d078514348ab2bc92aad7ea5a6fb 100644 (file)
 
                ckil {
                        compatible = "fsl,imx-ckil", "fixed-clock";
+                       #clock-cells = <0>;
                        clock-frequency = <32768>;
                };
 
                ckih1 {
                        compatible = "fsl,imx-ckih1", "fixed-clock";
+                       #clock-cells = <0>;
                        clock-frequency = <22579200>;
                };
 
                ckih2 {
                        compatible = "fsl,imx-ckih2", "fixed-clock";
+                       #clock-cells = <0>;
                        clock-frequency = <0>;
                };
 
                osc {
                        compatible = "fsl,imx-osc", "fixed-clock";
+                       #clock-cells = <0>;
                        clock-frequency = <24000000>;
                };
        };
 
                                        port {
                                                lvds1_in: endpoint {
-                                                       remote-endpoint = <&ipu_di0_lvds0>;
+                                                       remote-endpoint = <&ipu_di1_lvds1>;
                                                };
                                        };
                                };
index a63bbb3d46bb43637ad2235bb3892c82508f1ac9..e4ae38fd02692a1d4a41876da4958519bb1465da 100644 (file)
        compatible = "dmo,imx6q-edmqmx6", "fsl,imx6q";
 
        aliases {
-               gpio7 = &stmpe_gpio;
+               gpio7 = &stmpe_gpio1;
+               gpio8 = &stmpe_gpio2;
+               stmpe-i2c0 = &stmpe1;
+               stmpe-i2c1 = &stmpe2;
        };
 
        memory {
                        regulator-always-on;
                };
 
-               reg_usb_otg_vbus: regulator@1 {
+               reg_usb_otg_switch: regulator@1 {
                        compatible = "regulator-fixed";
                        reg = <1>;
-                       regulator-name = "usb_otg_vbus";
+                       regulator-name = "usb_otg_switch";
                        regulator-min-microvolt = <5000000>;
                        regulator-max-microvolt = <5000000>;
                        gpio = <&gpio7 12 0>;
+                       regulator-boot-on;
+                       regulator-always-on;
                };
 
                reg_usb_host1: regulator@2 {
 
                led-blue {
                        label = "blue";
-                       gpios = <&stmpe_gpio 8 GPIO_ACTIVE_HIGH>;
+                       gpios = <&stmpe_gpio1 8 GPIO_ACTIVE_HIGH>;
                        linux,default-trigger = "heartbeat";
                };
 
                led-green {
                        label = "green";
-                       gpios = <&stmpe_gpio 9 GPIO_ACTIVE_HIGH>;
+                       gpios = <&stmpe_gpio1 9 GPIO_ACTIVE_HIGH>;
                };
 
                led-pink {
                        label = "pink";
-                       gpios = <&stmpe_gpio 10 GPIO_ACTIVE_HIGH>;
+                       gpios = <&stmpe_gpio1 10 GPIO_ACTIVE_HIGH>;
                };
 
                led-red {
                        label = "red";
-                       gpios = <&stmpe_gpio 11 GPIO_ACTIVE_HIGH>;
+                       gpios = <&stmpe_gpio1 11 GPIO_ACTIVE_HIGH>;
                };
        };
 };
        clock-frequency = <100000>;
        pinctrl-names = "default";
        pinctrl-0 = <&pinctrl_i2c2
-                    &pinctrl_stmpe>;
+                    &pinctrl_stmpe1
+                    &pinctrl_stmpe2>;
        status = "okay";
 
        pmic: pfuze100@08 {
                };
        };
 
-       stmpe: stmpe1601@40 {
+       stmpe1: stmpe1601@40 {
                compatible = "st,stmpe1601";
                reg = <0x40>;
                interrupts = <30 0>;
                interrupt-parent = <&gpio3>;
 
-               stmpe_gpio: stmpe_gpio {
+               stmpe_gpio1: stmpe_gpio {
+                       #gpio-cells = <2>;
+                       compatible = "st,stmpe-gpio";
+               };
+       };
+
+       stmpe2: stmpe1601@44 {
+               compatible = "st,stmpe1601";
+               reg = <0x44>;
+               interrupts = <2 0>;
+               interrupt-parent = <&gpio5>;
+
+               stmpe_gpio2: stmpe_gpio {
                        #gpio-cells = <2>;
                        compatible = "st,stmpe-gpio";
                };
                        >;
                };
 
-               pinctrl_stmpe: stmpegrp {
+               pinctrl_stmpe1: stmpe1grp {
                        fsl,pins = <MX6QDL_PAD_EIM_D30__GPIO3_IO30 0x80000000>;
                };
 
+               pinctrl_stmpe2: stmpe2grp {
+                       fsl,pins = <MX6QDL_PAD_EIM_A25__GPIO5_IO02 0x80000000>;
+               };
+
                pinctrl_uart1: uart1grp {
                        fsl,pins = <
                                MX6QDL_PAD_SD3_DAT7__UART1_TX_DATA      0x1b0b1
 
                pinctrl_usbotg: usbotggrp {
                        fsl,pins = <
-                               MX6QDL_PAD_GPIO_1__USB_OTG_ID           0x17059
+                               MX6QDL_PAD_ENET_RX_ER__USB_OTG_ID       0x17059
                        >;
                };
 
 &usbh1 {
        vbus-supply = <&reg_usb_host1>;
        disable-over-current;
+       dr_mode = "host";
        status = "okay";
 };
 
 &usbotg {
-       vbus-supply = <&reg_usb_otg_vbus>;
        pinctrl-names = "default";
        pinctrl-0 = <&pinctrl_usbotg>;
        disable-over-current;
index 902f9831048139318f80ffc661be963cdc799db2..e51bb3f0fd560ddec5372582549b8b3a8f922cb0 100644 (file)
 
 &ldb {
        status = "okay";
-       lvds-channel@0 {
-               crtcs = <&ipu1 0>, <&ipu1 1>, <&ipu2 0>, <&ipu2 1>;
-       };
 };
 
 &pcie {
index 8e99c9a9bc762ab79309641383323cf463bbbcc8..035d3a85c318b1f842d0d3664a2e0cd9d56ab798 100644 (file)
 
 &ldb {
        status = "okay";
-       lvds-channel@0 {
-               crtcs = <&ipu1 0>, <&ipu1 1>;
-       };
 };
 
 &pcie {
index a3cb2fff8f612183bdf6b311d707a1f5210c2bec..d16066608e21ae3716bc52a58597dc51f6ed9241 100644 (file)
                                /* GPIO16 -> AR8035 25MHz */
                                MX6QDL_PAD_GPIO_16__ENET_REF_CLK        0xc0000000
                                MX6QDL_PAD_RGMII_TXC__RGMII_TXC         0x80000000
-                               MX6QDL_PAD_RGMII_TD0__RGMII_TD0         0x1b0b0
-                               MX6QDL_PAD_RGMII_TD1__RGMII_TD1         0x1b0b0
-                               MX6QDL_PAD_RGMII_TD2__RGMII_TD2         0x1b0b0
-                               MX6QDL_PAD_RGMII_TD3__RGMII_TD3         0x1b0b0
-                               MX6QDL_PAD_RGMII_TX_CTL__RGMII_TX_CTL   0x1b0b0
+                               MX6QDL_PAD_RGMII_TD0__RGMII_TD0         0x1b030
+                               MX6QDL_PAD_RGMII_TD1__RGMII_TD1         0x1b030
+                               MX6QDL_PAD_RGMII_TD2__RGMII_TD2         0x1b030
+                               MX6QDL_PAD_RGMII_TD3__RGMII_TD3         0x1b030
+                               MX6QDL_PAD_RGMII_TX_CTL__RGMII_TX_CTL   0x1b030
                                /* AR8035 CLK_25M --> ENET_REF_CLK (V22) */
                                MX6QDL_PAD_ENET_REF_CLK__ENET_TX_CLK    0x0a0b1
                                /* AR8035 pin strapping: IO voltage: pull up */
-                               MX6QDL_PAD_RGMII_RXC__RGMII_RXC         0x1b0b0
+                               MX6QDL_PAD_RGMII_RXC__RGMII_RXC         0x1b030
                                /* AR8035 pin strapping: PHYADDR#0: pull down */
-                               MX6QDL_PAD_RGMII_RD0__RGMII_RD0         0x130b0
+                               MX6QDL_PAD_RGMII_RD0__RGMII_RD0         0x13030
                                /* AR8035 pin strapping: PHYADDR#1: pull down */
-                               MX6QDL_PAD_RGMII_RD1__RGMII_RD1         0x130b0
+                               MX6QDL_PAD_RGMII_RD1__RGMII_RD1         0x13030
                                /* AR8035 pin strapping: MODE#1: pull up */
-                               MX6QDL_PAD_RGMII_RD2__RGMII_RD2         0x1b0b0
+                               MX6QDL_PAD_RGMII_RD2__RGMII_RD2         0x1b030
                                /* AR8035 pin strapping: MODE#3: pull up */
-                               MX6QDL_PAD_RGMII_RD3__RGMII_RD3         0x1b0b0
+                               MX6QDL_PAD_RGMII_RD3__RGMII_RD3         0x1b030
                                /* AR8035 pin strapping: MODE#0: pull down */
-                               MX6QDL_PAD_RGMII_RX_CTL__RGMII_RX_CTL   0x130b0
+                               MX6QDL_PAD_RGMII_RX_CTL__RGMII_RX_CTL   0x13030
 
                                /*
                                 * As the RMII pins are also connected to RGMII
index 55cb926fa3f7ed4fbe043587e74d57af344c7bad..eca0971d4db1ae7885985e8ce966d6475b3f3991 100644 (file)
@@ -10,6 +10,8 @@
  * http://www.gnu.org/copyleft/gpl.html
  */
 
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+
 #include "skeleton.dtsi"
 
 / {
@@ -46,8 +48,6 @@
        intc: interrupt-controller@00a01000 {
                compatible = "arm,cortex-a9-gic";
                #interrupt-cells = <3>;
-               #address-cells = <1>;
-               #size-cells = <1>;
                interrupt-controller;
                reg = <0x00a01000 0x1000>,
                      <0x00a00100 0x100>;
 
                ckil {
                        compatible = "fsl,imx-ckil", "fixed-clock";
+                       #clock-cells = <0>;
                        clock-frequency = <32768>;
                };
 
                ckih1 {
                        compatible = "fsl,imx-ckih1", "fixed-clock";
+                       #clock-cells = <0>;
                        clock-frequency = <0>;
                };
 
                osc {
                        compatible = "fsl,imx-osc", "fixed-clock";
+                       #clock-cells = <0>;
                        clock-frequency = <24000000>;
                };
        };
                                  0x82000000 0 0x01000000 0x01000000 0 0x00f00000>; /* non-prefetchable memory */
                        num-lanes = <1>;
                        interrupts = <0 123 IRQ_TYPE_LEVEL_HIGH>;
+                       #interrupt-cells = <1>;
+                       interrupt-map-mask = <0 0 0 0x7>;
+                       interrupt-map = <0 0 0 1 &intc GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>,
+                                       <0 0 0 2 &intc GIC_SPI 122 IRQ_TYPE_LEVEL_HIGH>,
+                                       <0 0 0 3 &intc GIC_SPI 121 IRQ_TYPE_LEVEL_HIGH>,
+                                       <0 0 0 4 &intc GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>;
                        clocks = <&clks 189>, <&clks 187>, <&clks 206>, <&clks 144>;
                        clock-names = "pcie_ref_125m", "sata_ref_100m", "lvds_gate", "pcie_axi";
                        status = "disabled";
index 864d8dfb51ca525ebc04c0769073fdeda273835b..a8d9a93fab85fd5031eb8c164fc676823392e0b2 100644 (file)
                                MX6SL_PAD_ECSPI1_MISO__ECSPI1_MISO      0x100b1
                                MX6SL_PAD_ECSPI1_MOSI__ECSPI1_MOSI      0x100b1
                                MX6SL_PAD_ECSPI1_SCLK__ECSPI1_SCLK      0x100b1
+                               MX6SL_PAD_ECSPI1_SS0__GPIO4_IO11        0x80000000
                        >;
                };
 
index 3cb4941afeef9ab6cb121b4271d78c522ff771f8..d26b099260a35da021d85c9f20907294fcd27625 100644 (file)
@@ -68,8 +68,6 @@
        intc: interrupt-controller@00a01000 {
                compatible = "arm,cortex-a9-gic";
                #interrupt-cells = <3>;
-               #address-cells = <1>;
-               #size-cells = <1>;
                interrupt-controller;
                reg = <0x00a01000 0x1000>,
                      <0x00a00100 0x100>;
 
                ckil {
                        compatible = "fixed-clock";
+                       #clock-cells = <0>;
                        clock-frequency = <32768>;
                };
 
                osc {
                        compatible = "fixed-clock";
+                       #clock-cells = <0>;
                        clock-frequency = <24000000>;
                };
        };
index 40791053106bc9889b996544ef3de7e89440d626..6becedebaa4e946e9fc080096a771053d06854df 100644 (file)
@@ -75,7 +75,7 @@
                        m25p16@0 {
                                #address-cells = <1>;
                                #size-cells = <1>;
-                               compatible = "m25p16";
+                               compatible = "st,m25p16";
                                reg = <0>;
                                spi-max-frequency = <40000000>;
                                mode = <0>;
index 0e06fd3cee4dc40113e94420f767d4ffba7301c7..3b62aeeaa3a2fe1ff5b01eab47e7c25718b45647 100644 (file)
@@ -46,7 +46,7 @@
                        flash@0 {
                                #address-cells = <1>;
                                #size-cells = <1>;
-                               compatible = "mx25l4005a";
+                               compatible = "mxicy,mx25l4005a";
                                reg = <0>;
                                spi-max-frequency = <20000000>;
                                mode = <0>;
index ef3463e0ae1939be8bcffa3a7d0200c387a8dc5f..28b3ee369778f945379096fea755796ef6156cef 100644 (file)
@@ -43,7 +43,7 @@
                        m25p40@0 {
                                #address-cells = <1>;
                                #size-cells = <1>;
-                               compatible = "mx25l1606e";
+                               compatible = "mxicy,mx25l1606e";
                                reg = <0>;
                                spi-max-frequency = <50000000>;
                                mode = <0>;
index c9e82eff9bf2e73af3204e9be1f79dff2bc62410..6761ffa2c4ab7eb9e130dbef6a00e4a2faea1714 100644 (file)
@@ -48,7 +48,7 @@
                        status = "okay";
 
                        eeprom@50 {
-                               compatible = "at,24c04";
+                               compatible = "atmel,24c04";
                                pagesize = <16>;
                                reg = <0x50>;
                        };
index 2cb0dc529165dcd88cbbba8ae7df7da2647a041f..32c6fb4a11624c05756e4e2fe24b05c62c799667 100644 (file)
@@ -56,7 +56,7 @@
                        flash@0 {
                                #address-cells = <1>;
                                #size-cells = <1>;
-                               compatible = "mx25l12805d";
+                               compatible = "mxicy,mx25l12805d";
                                reg = <0>;
                                spi-max-frequency = <50000000>;
                                mode = <0>;
index 743152f31a815b0e970b7dab80e4ed8af94adb87..e6e5ec4fe6b9e005b9c37ced8fda18f31bb5fbe0 100644 (file)
@@ -32,7 +32,7 @@
                        flash@0 {
                                #address-cells = <1>;
                                #size-cells = <1>;
-                               compatible = "mx25l4005a";
+                               compatible = "mxicy,mx25l4005a";
                                reg = <0>;
                                spi-max-frequency = <20000000>;
                                mode = <0>;
@@ -50,7 +50,7 @@
                        status = "okay";
 
                        eeprom@50 {
-                               compatible = "at,24c04";
+                               compatible = "atmel,24c04";
                                pagesize = <16>;
                                reg = <0x50>;
                        };
index 03fa24cf334468ff66095883b64ac5b76ae6bc42..0a07af9d8e58d0c06938fe0284332f87a650503b 100644 (file)
                        status = "okay";
 
                        adt7476: adt7476a@2e {
-                               compatible = "adt7476";
+                               compatible = "adi,adt7476";
                                reg = <0x2e>;
                        };
                };
index a5e77945286776940aa38d363ad08bc081e85e3b..27ca6a79c48a473f15d082287e19a605ef98b651 100644 (file)
@@ -94,7 +94,7 @@
                        status = "okay";
 
                        lm85: lm85@2e {
-                               compatible = "lm85";
+                               compatible = "national,lm85";
                                reg = <0x2e>;
                        };
                };
index b88da9392c32dd93780ead4c80dd8b5e4bf6934a..0650beafc1de0ac4a7e60fbc759ed7726982c22a 100644 (file)
@@ -40,7 +40,7 @@
                        pinctrl-names = "default";
 
                        s35390a: s35390a@30 {
-                               compatible = "s35390a";
+                               compatible = "sii,s35390a";
                                reg = <0x30>;
                        };
                };
index b2f7cae0683959f7c75ba82b0a195762d7fd32f4..38520a2875146d565c8016f69cc4f324098ff1ee 100644 (file)
@@ -52,7 +52,7 @@
                        pinctrl-names = "default";
 
                        s24c02: s24c02@50 {
-                               compatible = "24c02";
+                               compatible = "atmel,24c02";
                                reg = <0x50>;
                        };
                };
diff --git a/arch/arm/boot/dts/omap3-beagle-xm-ab.dts b/arch/arm/boot/dts/omap3-beagle-xm-ab.dts
new file mode 100644 (file)
index 0000000..7ac3bcf
--- /dev/null
@@ -0,0 +1,16 @@
+/*
+ * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "omap3-beagle-xm.dts"
+
+/ {
+       /* HS USB Port 2 Power enable was inverted with the xM C */
+       hsusb2_power: hsusb2_power_reg {
+               enable-active-high;
+       };
+};
index bf5a515a324752d8fd36e96fb288b07d3b2f31e1..da402f0fdab4861bf9b775b247cabaac192f8777 100644 (file)
                reg = <0 0 0>; /* CS0, offset 0 */
                nand-bus-width = <16>;
 
-               gpmc,device-nand;
                gpmc,sync-clk-ps = <0>;
                gpmc,cs-on-ns = <0>;
                gpmc,cs-rd-off-ns = <44>;
index 6369d9f43ca23e701197945ccf41483f3d7106b5..cc1dce6978f59323ba7579d66f6b29dd2a9b6d8b 100644 (file)
                /* no elm on omap3 */
 
                gpmc,mux-add-data = <0>;
-               gpmc,device-nand;
                gpmc,device-width = <2>;
                gpmc,wait-pin = <0>;
                gpmc,wait-monitoring-ns = <0>;
index 5e5790f631eba4b5b1085ac64a1c4d5a0f6062c9..acb9019dc437b66321ec7995dc16456678a2e42c 100644 (file)
@@ -74,7 +74,7 @@
        /*
         * XXX: Use a flat representation of the OMAP3 interconnect.
         * The real OMAP interconnect network is quite complex.
-        * Since that will not bring real advantage to represent that in DT for
+        * Since it will not bring real advantage to represent that in DT for
         * the moment, just use a fake OCP bus entry to represent the whole bus
         * hierarchy.
         */
index 27fcac874742894879bb978b8157f213a1c49eb2..649b5cd38b403102e9a6d5ddebf918a3e39ab73e 100644 (file)
@@ -72,7 +72,7 @@
        };
 
        /*
-        * The soc node represents the soc top level view. It is uses for IPs
+        * The soc node represents the soc top level view. It is used for IPs
         * that are not memory mapped in the MPU view or for the MPU itself.
         */
        soc {
@@ -96,7 +96,7 @@
        /*
         * XXX: Use a flat representation of the OMAP4 interconnect.
         * The real OMAP interconnect network is quite complex.
-        * Since that will not bring real advantage to represent that in DT for
+        * Since it will not bring real advantage to represent that in DT for
         * the moment, just use a fake OCP bus entry to represent the whole bus
         * hierarchy.
         */
index 6f3de22fb2660f20a61f2ec4438be6708ab40a6d..f8c9855ce587c15790f79a2f04e0ff02933f463f 100644 (file)
@@ -93,7 +93,7 @@
        };
 
        /*
-        * The soc node represents the soc top level view. It is uses for IPs
+        * The soc node represents the soc top level view. It is used for IPs
         * that are not memory mapped in the MPU view or for the MPU itself.
         */
        soc {
        /*
         * XXX: Use a flat representation of the OMAP3 interconnect.
         * The real OMAP interconnect network is quite complex.
-        * Since that will not bring real advantage to represent that in DT for
+        * Since it will not bring real advantage to represent that in DT for
         * the moment, just use a fake OCP bus entry to represent the whole bus
         * hierarchy.
         */
                                      <0x4a084c00 0x40>;
                                reg-names = "phy_rx", "phy_tx", "pll_ctrl";
                                ctrl-module = <&omap_control_usb3phy>;
+                               clocks = <&usb_phy_cm_clk32k>,
+                                        <&sys_clkin>,
+                                        <&usb_otg_ss_refclk960m>;
+                               clock-names =   "wkupclk",
+                                               "sysclk",
+                                               "refclk";
                                #phy-cells = <0>;
                        };
                };
index 8280884bfa596b95d447b11505b775783dc260a2..2551e9438d358a55e8e4edb7c494ea231ee46810 100644 (file)
@@ -28,7 +28,6 @@
        gic: interrupt-controller@c2800000 {
                compatible = "arm,cortex-a9-gic";
                #interrupt-cells = <3>;
-               #address-cells = <1>;
                interrupt-controller;
                reg = <0xc2800000 0x1000>,
                      <0xc2000000 0x1000>;
index 6e99eb2df076d7f1c7cdfdac46733c968256c0dd..d01048ab3e777534e224eb9a9395ba0a83cd56b7 100644 (file)
        };
 
        sdhi0_pins: sd0 {
-               renesas,gpios = "sdhi0_data4", "sdhi0_ctrl";
+               renesas,groups = "sdhi0_data4", "sdhi0_ctrl";
                renesas,function = "sdhi0";
        };
 
        sdhi2_pins: sd2 {
-               renesas,gpios = "sdhi2_data4", "sdhi2_ctrl";
+               renesas,groups = "sdhi2_data4", "sdhi2_ctrl";
                renesas,function = "sdhi2";
        };
 
index bdd73e6657b27a76ee2d2f7c37abdced267b96ae..de1b6977c69a4b009d3e658b8d9790b373278d49 100644 (file)
        };
 
        sdhi0_pins: sd0 {
-               renesas,gpios = "sdhi0_data4", "sdhi0_ctrl";
+               renesas,groups = "sdhi0_data4", "sdhi0_ctrl";
                renesas,function = "sdhi0";
        };
 
        sdhi1_pins: sd1 {
-               renesas,gpios = "sdhi1_data4", "sdhi1_ctrl";
+               renesas,groups = "sdhi1_data4", "sdhi1_ctrl";
                renesas,function = "sdhi1";
        };
 
        sdhi2_pins: sd2 {
-               renesas,gpios = "sdhi2_data4", "sdhi2_ctrl";
+               renesas,groups = "sdhi2_data4", "sdhi2_ctrl";
                renesas,function = "sdhi2";
        };
 
index bb36596ea20538ac9ac4d74f868c72fed40b5613..ed9a70af3e3f88ff59a266165b2655754e50dbda 100644 (file)
 
                        uart0 {
                                uart0_xfer: uart0-xfer {
-                                       rockchip,pins = <RK_GPIO1 0 RK_FUNC_1 &pcfg_pull_none>,
+                                       rockchip,pins = <RK_GPIO1 0 RK_FUNC_1 &pcfg_pull_up>,
                                                        <RK_GPIO1 1 RK_FUNC_1 &pcfg_pull_none>;
                                };
 
 
                        uart1 {
                                uart1_xfer: uart1-xfer {
-                                       rockchip,pins = <RK_GPIO1 4 RK_FUNC_1 &pcfg_pull_none>,
+                                       rockchip,pins = <RK_GPIO1 4 RK_FUNC_1 &pcfg_pull_up>,
                                                        <RK_GPIO1 5 RK_FUNC_1 &pcfg_pull_none>;
                                };
 
 
                        uart2 {
                                uart2_xfer: uart2-xfer {
-                                       rockchip,pins = <RK_GPIO1 8 RK_FUNC_1 &pcfg_pull_none>,
+                                       rockchip,pins = <RK_GPIO1 8 RK_FUNC_1 &pcfg_pull_up>,
                                                        <RK_GPIO1 9 RK_FUNC_1 &pcfg_pull_none>;
                                };
                                /* no rts / cts for uart2 */
 
                        uart3 {
                                uart3_xfer: uart3-xfer {
-                                       rockchip,pins = <RK_GPIO1 10 RK_FUNC_1 &pcfg_pull_none>,
+                                       rockchip,pins = <RK_GPIO1 10 RK_FUNC_1 &pcfg_pull_up>,
                                                        <RK_GPIO1 11 RK_FUNC_1 &pcfg_pull_none>;
                                };
 
index b7bd3b9a67533933623ba5b5c118ae46c035f019..5ecf552e1c009faf2317793e2b52ab6f24fc5655 100644 (file)
@@ -34,7 +34,6 @@
        gic: interrupt-controller@f0001000 {
                compatible = "arm,cortex-a9-gic";
                #interrupt-cells = <3>;
-               #address-cells = <1>;
                interrupt-controller;
                reg = <0xf0001000 0x1000>,
                      <0xf0000100 0x100>;
index f09fb10a3791a7e4fc238f4e47548fc7f86da705..81df870e5ee6791530b3902aab65ea6576bf47f8 100644 (file)
@@ -49,7 +49,7 @@
                        reg             = <0xfe61f080 0x4>;
                        reg-names       = "irqmux";
                        interrupts      = <GIC_SPI 180 IRQ_TYPE_LEVEL_HIGH>;
-                       interrupts-names = "irqmux";
+                       interrupt-names = "irqmux";
                        ranges          = <0 0xfe610000 0x5000>;
 
                        PIO0: gpio@fe610000 {
                        reg             = <0xfee0f080 0x4>;
                        reg-names       = "irqmux";
                        interrupts      = <GIC_SPI 181 IRQ_TYPE_LEVEL_HIGH>;
-                       interrupts-names = "irqmux";
+                       interrupt-names = "irqmux";
                        ranges          = <0 0xfee00000 0x8000>;
 
                        PIO5: gpio@fee00000 {
                        reg             = <0xfe82f080 0x4>;
                        reg-names       = "irqmux";
                        interrupts      = <GIC_SPI 182 IRQ_TYPE_LEVEL_HIGH>;
-                       interrupts-names = "irqmux";
+                       interrupt-names = "irqmux";
                        ranges          = <0 0xfe820000 0x8000>;
 
                        PIO13: gpio@fe820000 {
                        reg             = <0xfd6bf080 0x4>;
                        reg-names       = "irqmux";
                        interrupts      = <GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH>;
-                       interrupts-names = "irqmux";
+                       interrupt-names = "irqmux";
                        ranges          = <0 0xfd6b0000 0x3000>;
 
                        PIO100: gpio@fd6b0000 {
                        reg             = <0xfd33f080 0x4>;
                        reg-names       = "irqmux";
                        interrupts      = <GIC_SPI 114 IRQ_TYPE_LEVEL_HIGH>;
-                       interrupts-names = "irqmux";
+                       interrupt-names = "irqmux";
                        ranges          = <0 0xfd330000 0x5000>;
 
                        PIO103: gpio@fd330000 {
index aeea304086eb3b57c5682539643f0d6ca4540c0d..250d5ecc951ea0e3e5c7f071fb4e38b6312840d7 100644 (file)
@@ -53,7 +53,7 @@
                        reg             = <0xfe61f080 0x4>;
                        reg-names       = "irqmux";
                        interrupts      = <GIC_SPI 182 IRQ_TYPE_LEVEL_HIGH>;
-                       interrupts-names = "irqmux";
+                       interrupt-names = "irqmux";
                        ranges          = <0 0xfe610000 0x6000>;
 
                        PIO0: gpio@fe610000 {
                        reg             = <0xfee0f080 0x4>;
                        reg-names       = "irqmux";
                        interrupts      = <GIC_SPI 183 IRQ_TYPE_LEVEL_HIGH>;
-                       interrupts-names = "irqmux";
+                       interrupt-names = "irqmux";
                        ranges          = <0 0xfee00000 0x10000>;
 
                        PIO5: gpio@fee00000 {
                        reg             = <0xfe82f080 0x4>;
                        reg-names       = "irqmux";
                        interrupts      = <GIC_SPI 184 IRQ_TYPE_LEVEL_HIGH>;
-                       interrupts-names = "irqmux";
+                       interrupt-names = "irqmux";
                        ranges          = <0 0xfe820000 0x6000>;
 
                        PIO13: gpio@fe820000 {
                        reg             = <0xfd6bf080 0x4>;
                        reg-names       = "irqmux";
                        interrupts      = <GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH>;
-                       interrupts-names = "irqmux";
+                       interrupt-names = "irqmux";
                        ranges          = <0 0xfd6b0000 0x3000>;
 
                        PIO100: gpio@fd6b0000 {
                        reg             = <0xfd33f080 0x4>;
                        reg-names       = "irqmux";
                        interrupts      = <GIC_SPI 114 IRQ_TYPE_LEVEL_HIGH>;
-                       interrupts-names = "irqmux";
+                       interrupt-names = "irqmux";
                        ranges                  = <0 0xfd330000 0x5000>;
 
                        PIO103: gpio@fd330000 {
index cf45a1a394835ecc64309e00f58f5b33a5855260..6d540a02514886d37e095a7e47909457db9ef0e5 100644 (file)
                status = "disabled";
        };
 
-       serial@0,70006400 {
-               compatible = "nvidia,tegra124-uart", "nvidia,tegra20-uart";
-               reg = <0x0 0x70006400 0x0 0x40>;
-               reg-shift = <2>;
-               interrupts = <GIC_SPI 91 IRQ_TYPE_LEVEL_HIGH>;
-               clocks = <&tegra_car TEGRA124_CLK_UARTE>;
-               resets = <&tegra_car 66>;
-               reset-names = "serial";
-               dmas = <&apbdma 20>, <&apbdma 20>;
-               dma-names = "rx", "tx";
-               status = "disabled";
-       };
-
        pwm@0,7000a000 {
                compatible = "nvidia,tegra124-pwm", "nvidia,tegra20-pwm";
                reg = <0x0 0x7000a000 0x0 0x100>;
index 7dd1d6ede5258e9b45c384bd2384428b0e8a7818..ded361075aab7a1504eadc460defc992d5402161 100644 (file)
        clocks {
                audio_ext {
                        compatible = "fixed-clock";
+                       #clock-cells = <0>;
                        clock-frequency = <24576000>;
                };
 
                enet_ext {
                        compatible = "fixed-clock";
+                       #clock-cells = <0>;
                        clock-frequency = <50000000>;
                };
        };
index 8048733676693de212e505aeae15ae5ad020c01c..b8ce0aa7b1579064980edee427fa0cd41f7c710d 100644 (file)
 
                sxosc {
                        compatible = "fixed-clock";
+                       #clock-cells = <0>;
                        clock-frequency = <32768>;
                };
 
                fxosc {
                        compatible = "fixed-clock";
+                       #clock-cells = <0>;
                        clock-frequency = <24000000>;
                };
        };
@@ -72,8 +74,6 @@
                        intc: interrupt-controller@40002000 {
                                compatible = "arm,cortex-a9-gic";
                                #interrupt-cells = <3>;
-                               #address-cells = <1>;
-                               #size-cells = <1>;
                                interrupt-controller;
                                reg = <0x40003000 0x1000>,
                                      <0x40002100 0x100>;
index 51d0e912c8f585b1acb51edc9f47fc4270a1a988..1929ad390d88feb0ec42bce29ad1b174823ed55b 100644 (file)
                        reg = <0xd8100000 0x10000>;
                        interrupts = <48>;
                };
+
+               ethernet@d8004000 {
+                       compatible = "via,vt8500-rhine";
+                       reg = <0xd8004000 0x100>;
+                       interrupts = <10>;
+               };
        };
 };
index 7525982262ac9896285031462e45b23b4020d9c7..b1c59a766a13381a693d897eb3349db4ac9d3c16 100644 (file)
                        reg = <0xd8100000 0x10000>;
                        interrupts = <48>;
                };
+
+               ethernet@d8004000 {
+                       compatible = "via,vt8500-rhine";
+                       reg = <0xd8004000 0x100>;
+                       interrupts = <10>;
+               };
        };
 };
index d98386dd2882500bd71ecf726d8ac9bb26b777a7..8fbccfbe75f33df7be79ea7be37c15b9f5bf2535 100644 (file)
                        bus-width = <4>;
                        sdon-inverted;
                };
+
+               ethernet@d8004000 {
+                       compatible = "via,vt8500-rhine";
+                       reg = <0xd8004000 0x100>;
+                       interrupts = <10>;
+                };
        };
 };
index 511180769af5c0fb31acd6beb58cb1031a417a1d..c1176abc34d92d0491eeeadf74a926ff7fc360ed 100644 (file)
@@ -24,6 +24,7 @@
                        device_type = "cpu";
                        reg = <0>;
                        clocks = <&clkc 3>;
+                       clock-latency = <1000>;
                        operating-points = <
                                /* kHz    uV */
                                666667  1000000
                interrupt-parent = <&intc>;
                ranges;
 
+               i2c0: zynq-i2c@e0004000 {
+                       compatible = "cdns,i2c-r1p10";
+                       status = "disabled";
+                       clocks = <&clkc 38>;
+                       interrupt-parent = <&intc>;
+                       interrupts = <0 25 4>;
+                       reg = <0xe0004000 0x1000>;
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+               };
+
+               i2c1: zynq-i2c@e0005000 {
+                       compatible = "cdns,i2c-r1p10";
+                       status = "disabled";
+                       clocks = <&clkc 39>;
+                       interrupt-parent = <&intc>;
+                       interrupts = <0 48 4>;
+                       reg = <0xe0005000 0x1000>;
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+               };
+
                intc: interrupt-controller@f8f01000 {
                        compatible = "arm,cortex-a9-gic";
                        #interrupt-cells = <3>;
index c913f77a21ebfbb84c4533eaa94d5bcd905e3edb..5e09cee33d4230773f8687fd3e187f22e49d0b77 100644 (file)
        phy-mode = "rgmii";
 };
 
+&i2c0 {
+       status = "okay";
+       clock-frequency = <400000>;
+
+       i2cswitch@74 {
+               compatible = "nxp,pca9548";
+               #address-cells = <1>;
+               #size-cells = <0>;
+               reg = <0x74>;
+
+               i2c@0 {
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+                       reg = <0>;
+                       si570: clock-generator@5d {
+                               #clock-cells = <0>;
+                               compatible = "silabs,si570";
+                               temperature-stability = <50>;
+                               reg = <0x5d>;
+                               factory-fout = <156250000>;
+                               clock-frequency = <148500000>;
+                       };
+               };
+
+               i2c@2 {
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+                       reg = <2>;
+                       eeprom@54 {
+                               compatible = "at,24c08";
+                               reg = <0x54>;
+                       };
+               };
+
+               i2c@3 {
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+                       reg = <3>;
+                       gpio@21 {
+                               compatible = "ti,tca6416";
+                               reg = <0x21>;
+                               gpio-controller;
+                               #gpio-cells = <2>;
+                       };
+               };
+
+               i2c@4 {
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+                       reg = <4>;
+                       rtc@51 {
+                               compatible = "nxp,pcf8563";
+                               reg = <0x51>;
+                       };
+               };
+
+               i2c@7 {
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+                       reg = <7>;
+                       hwmon@52 {
+                               compatible = "ti,ucd9248";
+                               reg = <52>;
+                       };
+                       hwmon@53 {
+                               compatible = "ti,ucd9248";
+                               reg = <53>;
+                       };
+                       hwmon@54 {
+                               compatible = "ti,ucd9248";
+                               reg = <54>;
+                       };
+               };
+       };
+};
+
 &sdhci0 {
        status = "okay";
 };
index 88f62c50382ec59e5f35cc7a1319cb41c1585f13..4cc9913078cd6427ab69d206a21ba8d44c5c431d 100644 (file)
        phy-mode = "rgmii";
 };
 
+&i2c0 {
+       status = "okay";
+       clock-frequency = <400000>;
+
+       i2cswitch@74 {
+               compatible = "nxp,pca9548";
+               #address-cells = <1>;
+               #size-cells = <0>;
+               reg = <0x74>;
+
+               i2c@0 {
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+                       reg = <0>;
+                       si570: clock-generator@5d {
+                               #clock-cells = <0>;
+                               compatible = "silabs,si570";
+                               temperature-stability = <50>;
+                               reg = <0x5d>;
+                               factory-fout = <156250000>;
+                               clock-frequency = <148500000>;
+                       };
+               };
+
+               i2c@2 {
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+                       reg = <2>;
+                       eeprom@54 {
+                               compatible = "at,24c08";
+                               reg = <0x54>;
+                       };
+               };
+
+               i2c@3 {
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+                       reg = <3>;
+                       gpio@21 {
+                               compatible = "ti,tca6416";
+                               reg = <0x21>;
+                               gpio-controller;
+                               #gpio-cells = <2>;
+                       };
+               };
+
+               i2c@4 {
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+                       reg = <4>;
+                       rtc@51 {
+                               compatible = "nxp,pcf8563";
+                               reg = <0x51>;
+                       };
+               };
+
+               i2c@7 {
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+                       reg = <7>;
+                       ucd90120@65 {
+                               compatible = "ti,ucd90120";
+                               reg = <0x65>;
+                       };
+               };
+       };
+};
+
 &sdhci0 {
        status = "okay";
 };
index 5774b6ea7ad55ab0e6c56a5239c475f00a9feb32..f01c0ee0c87ebd94debc320f5714bf247dfe5ab7 100644 (file)
@@ -797,10 +797,8 @@ static int __init bL_switcher_init(void)
 {
        int ret;
 
-       if (MAX_NR_CLUSTERS != 2) {
-               pr_err("%s: only dual cluster systems are supported\n", __func__);
-               return -EINVAL;
-       }
+       if (!mcpm_is_available())
+               return -ENODEV;
 
        cpu_notifier(bL_switcher_hotplug_callback, 0);
 
index 1e361abc29eb0e106492223348de051d1e4e9f3d..86fd60fefbc935a788b52bdf8701efebbe5d5f8d 100644 (file)
@@ -48,6 +48,11 @@ int __init mcpm_platform_register(const struct mcpm_platform_ops *ops)
        return 0;
 }
 
+bool mcpm_is_available(void)
+{
+       return (platform_ops) ? true : false;
+}
+
 int mcpm_cpu_power_up(unsigned int cpu, unsigned int cluster)
 {
        if (!platform_ops)
index a9667957b7578bac7b218ea9163e657ecb385f88..a4e8d017f25bae466d8b0a918c753196870e9d78 100644 (file)
@@ -226,7 +226,7 @@ CONFIG_USB_DWC3=m
 CONFIG_USB_TEST=y
 CONFIG_NOP_USB_XCEIV=y
 CONFIG_OMAP_USB2=y
-CONFIG_OMAP_USB3=y
+CONFIG_TI_PIPE3=y
 CONFIG_AM335X_PHY_USB=y
 CONFIG_USB_GADGET=y
 CONFIG_USB_GADGET_DEBUG=y
index fd81a1b99cce5a0971315fb17bfb7f1c87f67773..aaa95ab606a83a647bfbc0f88de6a892f67123d6 100644 (file)
@@ -11,6 +11,7 @@ CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
 # CONFIG_LBDAF is not set
 # CONFIG_BLK_DEV_BSG is not set
+CONFIG_PARTITION_ADVANCED=y
 # CONFIG_IOSCHED_CFQ is not set
 # CONFIG_ARCH_MULTI_V7 is not set
 CONFIG_ARCH_U300=y
@@ -21,7 +22,6 @@ CONFIG_ZBOOT_ROM_TEXT=0x0
 CONFIG_ZBOOT_ROM_BSS=0x0
 CONFIG_CMDLINE="root=/dev/ram0 rw rootfstype=rootfs console=ttyAMA0,115200n8 lpj=515072"
 CONFIG_CPU_IDLE=y
-CONFIG_FPE_NWFPE=y
 # CONFIG_SUSPEND is not set
 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
 # CONFIG_PREVENT_FIRMWARE_BUILD is not set
@@ -64,8 +64,8 @@ CONFIG_TMPFS=y
 CONFIG_NLS_CODEPAGE_437=y
 CONFIG_NLS_ISO8859_1=y
 CONFIG_PRINTK_TIME=y
+CONFIG_DEBUG_INFO=y
 CONFIG_DEBUG_FS=y
 # CONFIG_SCHED_DEBUG is not set
 CONFIG_TIMER_STATS=y
 # CONFIG_DEBUG_PREEMPT is not set
-CONFIG_DEBUG_INFO=y
index 65f77885c1674df038d6d92d17603e76e0058a46..d219d6a43238c6e354639500af4e5a9d56ff8714 100644 (file)
@@ -1,16 +1,16 @@
 # CONFIG_SWAP is not set
 CONFIG_SYSVIPC=y
-CONFIG_NO_HZ=y
+CONFIG_NO_HZ_IDLE=y
 CONFIG_HIGH_RES_TIMERS=y
 CONFIG_BLK_DEV_INITRD=y
 CONFIG_KALLSYMS_ALL=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
 # CONFIG_BLK_DEV_BSG is not set
+CONFIG_PARTITION_ADVANCED=y
 CONFIG_ARCH_U8500=y
 CONFIG_MACH_HREFV60=y
 CONFIG_MACH_SNOWBALL=y
-CONFIG_MACH_UX500_DT=y
 CONFIG_SMP=y
 CONFIG_NR_CPUS=2
 CONFIG_PREEMPT=y
@@ -34,16 +34,22 @@ CONFIG_IP_PNP=y
 CONFIG_IP_PNP_DHCP=y
 CONFIG_NETFILTER=y
 CONFIG_PHONET=y
-# CONFIG_WIRELESS is not set
+CONFIG_CFG80211=y
+CONFIG_CFG80211_DEBUGFS=y
+CONFIG_MAC80211=y
+CONFIG_MAC80211_LEDS=y
 CONFIG_CAIF=y
 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
 CONFIG_BLK_DEV_RAM=y
 CONFIG_BLK_DEV_RAM_SIZE=65536
 CONFIG_SENSORS_BH1780=y
 CONFIG_NETDEVICES=y
 CONFIG_SMSC911X=y
 CONFIG_SMSC_PHY=y
-# CONFIG_WLAN is not set
+CONFIG_CW1200=y
+CONFIG_CW1200_WLAN_SDIO=y
 # CONFIG_INPUT_MOUSEDEV_PSAUX is not set
 CONFIG_INPUT_EVDEV=y
 # CONFIG_KEYBOARD_ATKBD is not set
@@ -85,15 +91,12 @@ CONFIG_AB8500_USB=y
 CONFIG_USB_GADGET=y
 CONFIG_USB_ETH=m
 CONFIG_MMC=y
-CONFIG_MMC_UNSAFE_RESUME=y
-# CONFIG_MMC_BLOCK_BOUNCE is not set
 CONFIG_MMC_ARMMMCI=y
 CONFIG_NEW_LEDS=y
 CONFIG_LEDS_CLASS=y
 CONFIG_LEDS_LM3530=y
 CONFIG_LEDS_GPIO=y
 CONFIG_LEDS_LP5521=y
-CONFIG_LEDS_TRIGGERS=y
 CONFIG_LEDS_TRIGGER_HEARTBEAT=y
 CONFIG_RTC_CLASS=y
 CONFIG_RTC_DRV_AB8500=y
@@ -103,6 +106,11 @@ CONFIG_STE_DMA40=y
 CONFIG_STAGING=y
 CONFIG_TOUCHSCREEN_SYNAPTICS_I2C_RMI4=y
 CONFIG_HSEM_U8500=y
+CONFIG_IIO=y
+CONFIG_IIO_ST_ACCEL_3AXIS=y
+CONFIG_IIO_ST_GYRO_3AXIS=y
+CONFIG_IIO_ST_MAGN_3AXIS=y
+CONFIG_IIO_ST_PRESS=y
 CONFIG_EXT2_FS=y
 CONFIG_EXT2_FS_XATTR=y
 CONFIG_EXT2_FS_POSIX_ACL=y
@@ -110,8 +118,6 @@ CONFIG_EXT2_FS_SECURITY=y
 CONFIG_EXT3_FS=y
 CONFIG_EXT4_FS=y
 CONFIG_VFAT_FS=y
-CONFIG_DEVTMPFS=y
-CONFIG_DEVTMPFS_MOUNT=y
 CONFIG_TMPFS=y
 CONFIG_TMPFS_POSIX_ACL=y
 # CONFIG_MISC_FILESYSTEMS is not set
index c651e3b26ec703b08cea0128eb76c23d9aa94d46..4764344367d4b3a14381aca696f01d335b75d50f 100644 (file)
@@ -222,22 +222,22 @@ static inline int cpu_is_xsc3(void)
 #endif
 
 /*
- * Marvell's PJ4 core is based on V7 version. It has some modification
- * for coprocessor setting. For this reason, we need a way to distinguish
- * it.
+ * Marvell's PJ4 and PJ4B cores are based on V7 version,
+ * but require a specical sequence for enabling coprocessors.
+ * For this reason, we need a way to distinguish them.
  */
-#ifndef CONFIG_CPU_PJ4
-#define cpu_is_pj4()   0
-#else
+#if defined(CONFIG_CPU_PJ4) || defined(CONFIG_CPU_PJ4B)
 static inline int cpu_is_pj4(void)
 {
        unsigned int id;
 
        id = read_cpuid_id();
-       if ((id & 0xfffffff0) == 0x562f5840)
+       if ((id & 0xff0fff00) == 0x560f5800)
                return 1;
 
        return 0;
 }
+#else
+#define cpu_is_pj4()   0
 #endif
 #endif
index 191ada6e4d2db3393270ef9595e9cc083e441909..662c7bd061081b2fadfc0e790e719d14cc526d77 100644 (file)
                /* Select the best insn combination to perform the   */ \
                /* actual __m * __n / (__p << 64) operation.         */ \
                if (!__c) {                                             \
-                       asm (   "umull  %Q0, %R0, %1, %Q2\n\t"          \
+                       asm (   "umull  %Q0, %R0, %Q1, %Q2\n\t"         \
                                "mov    %Q0, #0"                        \
                                : "=&r" (__res)                         \
                                : "r" (__m), "r" (__n)                  \
index 608516ebabfe6111a651f3a5ca6e63c607046fab..a5ff410dcdb6a47a03a03214bcc0d66fdc33da94 100644 (file)
@@ -53,6 +53,13 @@ void mcpm_set_early_poke(unsigned cpu, unsigned cluster,
  * CPU/cluster power operations API for higher subsystems to use.
  */
 
+/**
+ * mcpm_is_available - returns whether MCPM is initialized and available
+ *
+ * This returns true or false accordingly.
+ */
+bool mcpm_is_available(void);
+
 /**
  * mcpm_cpu_power_up - make given CPU in given cluster runable
  *
index 0baf7f0d939484264b089c772112657cb9f15c75..f1a0dace3efee423e7727e143550aae06f081fd5 100644 (file)
@@ -98,15 +98,25 @@ static inline void __tlb_alloc_page(struct mmu_gather *tlb)
        }
 }
 
-static inline void tlb_flush_mmu(struct mmu_gather *tlb)
+static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
 {
        tlb_flush(tlb);
+}
+
+static inline void tlb_flush_mmu_free(struct mmu_gather *tlb)
+{
        free_pages_and_swap_cache(tlb->pages, tlb->nr);
        tlb->nr = 0;
        if (tlb->pages == tlb->local)
                __tlb_alloc_page(tlb);
 }
 
+static inline void tlb_flush_mmu(struct mmu_gather *tlb)
+{
+       tlb_flush_mmu_tlbonly(tlb);
+       tlb_flush_mmu_free(tlb);
+}
+
 static inline void
 tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
 {
index fb5584d0cc050a6c55b30ff8342615a5a39a1c2f..ba94446c72d9127633de59545a3691390ecdfc5d 100644 (file)
 #define __NR_finit_module              (__NR_SYSCALL_BASE+379)
 #define __NR_sched_setattr             (__NR_SYSCALL_BASE+380)
 #define __NR_sched_getattr             (__NR_SYSCALL_BASE+381)
+#define __NR_renameat2                 (__NR_SYSCALL_BASE+382)
 
 /*
  * This may need to be greater than __NR_last_syscall+1 in order to
index a766bcbaf8adfbca3e4bb5ef4446bc5700454d7d..040619c32d68dfe4ce63726f44fcedcc9fe6b2cf 100644 (file)
@@ -79,6 +79,7 @@ obj-$(CONFIG_CPU_XSCALE)      += xscale-cp0.o
 obj-$(CONFIG_CPU_XSC3)         += xscale-cp0.o
 obj-$(CONFIG_CPU_MOHAWK)       += xscale-cp0.o
 obj-$(CONFIG_CPU_PJ4)          += pj4-cp0.o
+obj-$(CONFIG_CPU_PJ4B)         += pj4-cp0.o
 obj-$(CONFIG_IWMMXT)           += iwmmxt.o
 obj-$(CONFIG_PERF_EVENTS)      += perf_regs.o
 obj-$(CONFIG_HW_PERF_EVENTS)   += perf_event.o perf_event_cpu.o
index 166e945de832f22b603d6b0de2ca3eb92f2ec732..8f51bdcdacbbf6675933f38fb595adbdc825f4c2 100644 (file)
                CALL(sys_finit_module)
 /* 380 */      CALL(sys_sched_setattr)
                CALL(sys_sched_getattr)
+               CALL(sys_renameat2)
 #ifndef syscalls_counted
 .equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls
 #define syscalls_counted
index f8c08839edf3053c3ee9ac884fab3f61e8ba84b3..591d6e4a64922cda2cd23531e32aa43d339c5dd3 100644 (file)
@@ -587,7 +587,7 @@ __fixup_pv_table:
        add     r6, r6, r3      @ adjust __pv_phys_pfn_offset address
        add     r7, r7, r3      @ adjust __pv_offset address
        mov     r0, r8, lsr #12 @ convert to PFN
-       str     r0, [r6, #LOW_OFFSET]   @ save computed PHYS_OFFSET to __pv_phys_pfn_offset
+       str     r0, [r6]        @ save computed PHYS_OFFSET to __pv_phys_pfn_offset
        strcc   ip, [r7, #HIGH_OFFSET]  @ save to __pv_offset high bits
        mov     r6, r3, lsr #24 @ constant for add/sub instructions
        teq     r3, r6, lsl #24 @ must be 16MiB aligned
index a08783823b32fdde6dd73d7022b042b64c321bca..2452dd1bef53b0eb719dcda0ce127c2f5ddaeec9 100644 (file)
 #include <asm/thread_info.h>
 #include <asm/asm-offsets.h>
 
-#if defined(CONFIG_CPU_PJ4)
+#if defined(CONFIG_CPU_PJ4) || defined(CONFIG_CPU_PJ4B)
 #define PJ4(code...)           code
 #define XSC(code...)
-#else
+#elif defined(CONFIG_CPU_MOHAWK) || \
+       defined(CONFIG_CPU_XSC3) || \
+       defined(CONFIG_CPU_XSCALE)
 #define PJ4(code...)
 #define XSC(code...)           code
+#else
+#error "Unsupported iWMMXt architecture"
 #endif
 
 #define MMX_WR0                        (0x00)
index f0d180d8b29f4e22558a98fdf9366ebaf1c0c0cb..8cf0996aa1a8d795bfdb65add498aa1552829382 100644 (file)
@@ -184,3 +184,10 @@ void machine_kexec(struct kimage *image)
 
        soft_restart(reboot_entry_phys);
 }
+
+void arch_crash_save_vmcoreinfo(void)
+{
+#ifdef CONFIG_ARM_LPAE
+       VMCOREINFO_CONFIG(ARM_LPAE);
+#endif
+}
index fc72086362842436381d0595c1afea648eb7b830..8153e36b24917e96c8fa69d18bd78e8b0c130c1a 100644 (file)
@@ -45,7 +45,7 @@ static int iwmmxt_do(struct notifier_block *self, unsigned long cmd, void *t)
        return NOTIFY_DONE;
 }
 
-static struct notifier_block iwmmxt_notifier_block = {
+static struct notifier_block __maybe_unused iwmmxt_notifier_block = {
        .notifier_call  = iwmmxt_do,
 };
 
@@ -72,6 +72,33 @@ static void __init pj4_cp_access_write(u32 value)
                : "=r" (temp) : "r" (value));
 }
 
+static int __init pj4_get_iwmmxt_version(void)
+{
+       u32 cp_access, wcid;
+
+       cp_access = pj4_cp_access_read();
+       pj4_cp_access_write(cp_access | 0xf);
+
+       /* check if coprocessor 0 and 1 are available */
+       if ((pj4_cp_access_read() & 0xf) != 0xf) {
+               pj4_cp_access_write(cp_access);
+               return -ENODEV;
+       }
+
+       /* read iWMMXt coprocessor id register p1, c0 */
+       __asm__ __volatile__ ("mrc    p1, 0, %0, c0, c0, 0\n" : "=r" (wcid));
+
+       pj4_cp_access_write(cp_access);
+
+       /* iWMMXt v1 */
+       if ((wcid & 0xffffff00) == 0x56051000)
+               return 1;
+       /* iWMMXt v2 */
+       if ((wcid & 0xffffff00) == 0x56052000)
+               return 2;
+
+       return -EINVAL;
+}
 
 /*
  * Disable CP0/CP1 on boot, and let call_fpe() and the iWMMXt lazy
@@ -79,17 +106,26 @@ static void __init pj4_cp_access_write(u32 value)
  */
 static int __init pj4_cp0_init(void)
 {
-       u32 cp_access;
+       u32 __maybe_unused cp_access;
+       int vers;
 
        if (!cpu_is_pj4())
                return 0;
 
+       vers = pj4_get_iwmmxt_version();
+       if (vers < 0)
+               return 0;
+
+#ifndef CONFIG_IWMMXT
+       pr_info("PJ4 iWMMXt coprocessor detected, but kernel support is missing.\n");
+#else
        cp_access = pj4_cp_access_read() & ~0xf;
        pj4_cp_access_write(cp_access);
 
-       printk(KERN_INFO "PJ4 iWMMXt coprocessor enabled.\n");
+       pr_info("PJ4 iWMMXt v%d coprocessor enabled.\n", vers);
        elf_hwcap |= HWCAP_IWMMXT;
        thread_register_notifier(&iwmmxt_notifier_block);
+#endif
 
        return 0;
 }
index 702bd329d9d0cd4f8b0912ca3a9694f942f1a568..e90a3148f38540c98c9f7a34ccce3f9ab7de7581 100644 (file)
@@ -203,9 +203,9 @@ asmlinkage long sys_oabi_fcntl64(unsigned int fd, unsigned int cmd,
        int ret;
 
        switch (cmd) {
-       case F_GETLKP:
-       case F_SETLKP:
-       case F_SETLKPW:
+       case F_OFD_GETLK:
+       case F_OFD_SETLK:
+       case F_OFD_SETLKW:
        case F_GETLK64:
        case F_SETLK64:
        case F_SETLKW64:
index 466bd299b1a8aad54949364d976d9c5430c2375e..4be5bb150bdddea694fbf71bffa6dd8e9b855177 100644 (file)
@@ -23,7 +23,7 @@ config KVM
        select HAVE_KVM_CPU_RELAX_INTERCEPT
        select KVM_MMIO
        select KVM_ARM_HOST
-       depends on ARM_VIRT_EXT && ARM_LPAE
+       depends on ARM_VIRT_EXT && ARM_LPAE && !CPU_BIG_ENDIAN
        ---help---
          Support hosting virtualized guest machines. You will also
          need to select one or more of the processor modules below.
index 80bb1e6c2c2906d0764ae5b696e72053c8faff9c..16f804938b8fea9fee56fa93991cf8c45cf141e5 100644 (file)
@@ -42,6 +42,8 @@ static unsigned long hyp_idmap_start;
 static unsigned long hyp_idmap_end;
 static phys_addr_t hyp_idmap_vector;
 
+#define pgd_order get_order(PTRS_PER_PGD * sizeof(pgd_t))
+
 #define kvm_pmd_huge(_x)       (pmd_huge(_x) || pmd_trans_huge(_x))
 
 static void kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
@@ -293,14 +295,14 @@ void free_boot_hyp_pgd(void)
        if (boot_hyp_pgd) {
                unmap_range(NULL, boot_hyp_pgd, hyp_idmap_start, PAGE_SIZE);
                unmap_range(NULL, boot_hyp_pgd, TRAMPOLINE_VA, PAGE_SIZE);
-               kfree(boot_hyp_pgd);
+               free_pages((unsigned long)boot_hyp_pgd, pgd_order);
                boot_hyp_pgd = NULL;
        }
 
        if (hyp_pgd)
                unmap_range(NULL, hyp_pgd, TRAMPOLINE_VA, PAGE_SIZE);
 
-       kfree(init_bounce_page);
+       free_page((unsigned long)init_bounce_page);
        init_bounce_page = NULL;
 
        mutex_unlock(&kvm_hyp_pgd_mutex);
@@ -330,7 +332,7 @@ void free_hyp_pgds(void)
                for (addr = VMALLOC_START; is_vmalloc_addr((void*)addr); addr += PGDIR_SIZE)
                        unmap_range(NULL, hyp_pgd, KERN_TO_HYP(addr), PGDIR_SIZE);
 
-               kfree(hyp_pgd);
+               free_pages((unsigned long)hyp_pgd, pgd_order);
                hyp_pgd = NULL;
        }
 
@@ -1024,7 +1026,7 @@ int kvm_mmu_init(void)
                size_t len = __hyp_idmap_text_end - __hyp_idmap_text_start;
                phys_addr_t phys_base;
 
-               init_bounce_page = kmalloc(PAGE_SIZE, GFP_KERNEL);
+               init_bounce_page = (void *)__get_free_page(GFP_KERNEL);
                if (!init_bounce_page) {
                        kvm_err("Couldn't allocate HYP init bounce page\n");
                        err = -ENOMEM;
@@ -1050,8 +1052,9 @@ int kvm_mmu_init(void)
                         (unsigned long)phys_base);
        }
 
-       hyp_pgd = kzalloc(PTRS_PER_PGD * sizeof(pgd_t), GFP_KERNEL);
-       boot_hyp_pgd = kzalloc(PTRS_PER_PGD * sizeof(pgd_t), GFP_KERNEL);
+       hyp_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, pgd_order);
+       boot_hyp_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, pgd_order);
+
        if (!hyp_pgd || !boot_hyp_pgd) {
                kvm_err("Hyp mode PGD not allocated\n");
                err = -ENOMEM;
index 8b1b0a8700259961d04617ee064eb036679f8acd..a0282928e9c10bdbc67b385423b225f8e5315756 100644 (file)
@@ -1296,7 +1296,7 @@ static struct resource adc_resources[] = {
 };
 
 static struct platform_device at91_adc_device = {
-       .name           = "at91_adc",
+       .name           = "at91sam9260-adc",
        .id             = -1,
        .dev            = {
                                .platform_data          = &adc_data,
index 77b04c2edd783485d89f229a5c9c075bc68ab468..dab362c06487a856c9bcac67dd9248903c133fcd 100644 (file)
@@ -1204,7 +1204,7 @@ static struct resource adc_resources[] = {
 };
 
 static struct platform_device at91_adc_device = {
-       .name           = "at91_adc",
+       .name           = "at91sam9g45-adc",
        .id             = -1,
        .dev            = {
                                .platform_data  = &adc_data,
index b0e7f9d2c245ff093f1f08e11672e740f5170db0..2b4d6acfa34abdd67a6c6ed9becb44345dd17929 100644 (file)
@@ -208,8 +208,8 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node)
         * the "output_enable" bit as a gate, even though it's really just
         * enabling clock output.
         */
-       clk[lvds1_gate] = imx_clk_gate("lvds1_gate", "dummy", base + 0x160, 10);
-       clk[lvds2_gate] = imx_clk_gate("lvds2_gate", "dummy", base + 0x160, 11);
+       clk[lvds1_gate] = imx_clk_gate("lvds1_gate", "lvds1_sel", base + 0x160, 10);
+       clk[lvds2_gate] = imx_clk_gate("lvds2_gate", "lvds2_sel", base + 0x160, 11);
 
        /*                                name              parent_name        reg       idx */
        clk[pll2_pfd0_352m] = imx_clk_pfd("pll2_pfd0_352m", "pll2_bus",     base + 0x100, 0);
@@ -258,14 +258,14 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node)
        clk[ipu2_sel]         = imx_clk_mux("ipu2_sel",         base + 0x3c, 14, 2, ipu_sels,          ARRAY_SIZE(ipu_sels));
        clk[ldb_di0_sel]      = imx_clk_mux_flags("ldb_di0_sel", base + 0x2c, 9,  3, ldb_di_sels,      ARRAY_SIZE(ldb_di_sels), CLK_SET_RATE_PARENT);
        clk[ldb_di1_sel]      = imx_clk_mux_flags("ldb_di1_sel", base + 0x2c, 12, 3, ldb_di_sels,      ARRAY_SIZE(ldb_di_sels), CLK_SET_RATE_PARENT);
-       clk[ipu1_di0_pre_sel] = imx_clk_mux("ipu1_di0_pre_sel", base + 0x34, 6,  3, ipu_di_pre_sels,   ARRAY_SIZE(ipu_di_pre_sels));
-       clk[ipu1_di1_pre_sel] = imx_clk_mux("ipu1_di1_pre_sel", base + 0x34, 15, 3, ipu_di_pre_sels,   ARRAY_SIZE(ipu_di_pre_sels));
-       clk[ipu2_di0_pre_sel] = imx_clk_mux("ipu2_di0_pre_sel", base + 0x38, 6,  3, ipu_di_pre_sels,   ARRAY_SIZE(ipu_di_pre_sels));
-       clk[ipu2_di1_pre_sel] = imx_clk_mux("ipu2_di1_pre_sel", base + 0x38, 15, 3, ipu_di_pre_sels,   ARRAY_SIZE(ipu_di_pre_sels));
-       clk[ipu1_di0_sel]     = imx_clk_mux("ipu1_di0_sel",     base + 0x34, 0,  3, ipu1_di0_sels,     ARRAY_SIZE(ipu1_di0_sels));
-       clk[ipu1_di1_sel]     = imx_clk_mux("ipu1_di1_sel",     base + 0x34, 9,  3, ipu1_di1_sels,     ARRAY_SIZE(ipu1_di1_sels));
-       clk[ipu2_di0_sel]     = imx_clk_mux("ipu2_di0_sel",     base + 0x38, 0,  3, ipu2_di0_sels,     ARRAY_SIZE(ipu2_di0_sels));
-       clk[ipu2_di1_sel]     = imx_clk_mux("ipu2_di1_sel",     base + 0x38, 9,  3, ipu2_di1_sels,     ARRAY_SIZE(ipu2_di1_sels));
+       clk[ipu1_di0_pre_sel] = imx_clk_mux_flags("ipu1_di0_pre_sel", base + 0x34, 6,  3, ipu_di_pre_sels,   ARRAY_SIZE(ipu_di_pre_sels), CLK_SET_RATE_PARENT);
+       clk[ipu1_di1_pre_sel] = imx_clk_mux_flags("ipu1_di1_pre_sel", base + 0x34, 15, 3, ipu_di_pre_sels,   ARRAY_SIZE(ipu_di_pre_sels), CLK_SET_RATE_PARENT);
+       clk[ipu2_di0_pre_sel] = imx_clk_mux_flags("ipu2_di0_pre_sel", base + 0x38, 6,  3, ipu_di_pre_sels,   ARRAY_SIZE(ipu_di_pre_sels), CLK_SET_RATE_PARENT);
+       clk[ipu2_di1_pre_sel] = imx_clk_mux_flags("ipu2_di1_pre_sel", base + 0x38, 15, 3, ipu_di_pre_sels,   ARRAY_SIZE(ipu_di_pre_sels), CLK_SET_RATE_PARENT);
+       clk[ipu1_di0_sel]     = imx_clk_mux_flags("ipu1_di0_sel",     base + 0x34, 0,  3, ipu1_di0_sels,     ARRAY_SIZE(ipu1_di0_sels), CLK_SET_RATE_PARENT);
+       clk[ipu1_di1_sel]     = imx_clk_mux_flags("ipu1_di1_sel",     base + 0x34, 9,  3, ipu1_di1_sels,     ARRAY_SIZE(ipu1_di1_sels), CLK_SET_RATE_PARENT);
+       clk[ipu2_di0_sel]     = imx_clk_mux_flags("ipu2_di0_sel",     base + 0x38, 0,  3, ipu2_di0_sels,     ARRAY_SIZE(ipu2_di0_sels), CLK_SET_RATE_PARENT);
+       clk[ipu2_di1_sel]     = imx_clk_mux_flags("ipu2_di1_sel",     base + 0x38, 9,  3, ipu2_di1_sels,     ARRAY_SIZE(ipu2_di1_sels), CLK_SET_RATE_PARENT);
        clk[hsi_tx_sel]       = imx_clk_mux("hsi_tx_sel",       base + 0x30, 28, 1, hsi_tx_sels,       ARRAY_SIZE(hsi_tx_sels));
        clk[pcie_axi_sel]     = imx_clk_mux("pcie_axi_sel",     base + 0x18, 10, 1, pcie_axi_sels,     ARRAY_SIZE(pcie_axi_sels));
        clk[ssi1_sel]         = imx_clk_fixup_mux("ssi1_sel",   base + 0x1c, 10, 2, ssi_sels,          ARRAY_SIZE(ssi_sels),          imx_cscmr1_fixup);
@@ -445,6 +445,15 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node)
                clk_set_parent(clk[ldb_di1_sel], clk[pll5_video_div]);
        }
 
+       clk_set_parent(clk[ipu1_di0_pre_sel], clk[pll5_video_div]);
+       clk_set_parent(clk[ipu1_di1_pre_sel], clk[pll5_video_div]);
+       clk_set_parent(clk[ipu2_di0_pre_sel], clk[pll5_video_div]);
+       clk_set_parent(clk[ipu2_di1_pre_sel], clk[pll5_video_div]);
+       clk_set_parent(clk[ipu1_di0_sel], clk[ipu1_di0_pre]);
+       clk_set_parent(clk[ipu1_di1_sel], clk[ipu1_di1_pre]);
+       clk_set_parent(clk[ipu2_di0_sel], clk[ipu2_di0_pre]);
+       clk_set_parent(clk[ipu2_di1_sel], clk[ipu2_di1_pre]);
+
        /*
         * The gpmi needs 100MHz frequency in the EDO/Sync mode,
         * We can not get the 100MHz from the pll2_pfd0_352m.
index 43a90c8d68375594bb97d3a904021b534e217805..9cfebc5c7455b36b1123ae100efa513bd8fdbcdf 100644 (file)
@@ -48,7 +48,7 @@ static struct omap_dss_board_info rx51_dss_board_info = {
 
 static int __init rx51_video_init(void)
 {
-       if (!machine_is_nokia_rx51() && !of_machine_is_compatible("nokia,omap3-n900"))
+       if (!machine_is_nokia_rx51())
                return 0;
 
        if (omap_mux_init_gpio(RX51_LCD_RESET_GPIO, OMAP_PIN_OUTPUT)) {
index 2649ce445845288725c011bd66147cc103018339..332af927f4d3460f5852b3878279986ef965b675 100644 (file)
@@ -209,7 +209,7 @@ u8 omap2_init_dpll_parent(struct clk_hw *hw)
                if (v == OMAP3XXX_EN_DPLL_LPBYPASS ||
                    v == OMAP3XXX_EN_DPLL_FRBYPASS)
                        return 1;
-       } else if (soc_is_am33xx() || cpu_is_omap44xx()) {
+       } else if (soc_is_am33xx() || cpu_is_omap44xx() || soc_is_am43xx()) {
                if (v == OMAP4XXX_EN_DPLL_LPBYPASS ||
                    v == OMAP4XXX_EN_DPLL_FRBYPASS ||
                    v == OMAP4XXX_EN_DPLL_MNBYPASS)
@@ -255,7 +255,7 @@ unsigned long omap2_get_dpll_rate(struct clk_hw_omap *clk)
                if (v == OMAP3XXX_EN_DPLL_LPBYPASS ||
                    v == OMAP3XXX_EN_DPLL_FRBYPASS)
                        return __clk_get_rate(dd->clk_bypass);
-       } else if (soc_is_am33xx() || cpu_is_omap44xx()) {
+       } else if (soc_is_am33xx() || cpu_is_omap44xx() || soc_is_am43xx()) {
                if (v == OMAP4XXX_EN_DPLL_LPBYPASS ||
                    v == OMAP4XXX_EN_DPLL_FRBYPASS ||
                    v == OMAP4XXX_EN_DPLL_MNBYPASS)
index ab43755364f5a7c06ecfc367c0f055d65876dc54..9fe8c949305c3aff97626ca3edd5268696d57a8f 100644 (file)
@@ -501,7 +501,7 @@ static int gpmc_cs_delete_mem(int cs)
        int r;
 
        spin_lock(&gpmc_mem_lock);
-       r = release_resource(&gpmc_cs_mem[cs]);
+       r = release_resource(res);
        res->start = 0;
        res->end = 0;
        spin_unlock(&gpmc_mem_lock);
@@ -527,6 +527,14 @@ static int gpmc_cs_remap(int cs, u32 base)
                pr_err("%s: requested chip-select is disabled\n", __func__);
                return -ENODEV;
        }
+
+       /*
+        * Make sure we ignore any device offsets from the GPMC partition
+        * allocated for the chip select and that the new base confirms
+        * to the GPMC 16MB minimum granularity.
+        */ 
+       base &= ~(SZ_16M - 1);
+
        gpmc_cs_get_memconf(cs, &old_base, &size);
        if (base == old_base)
                return 0;
@@ -586,6 +594,8 @@ EXPORT_SYMBOL(gpmc_cs_request);
 
 void gpmc_cs_free(int cs)
 {
+       struct resource *res = &gpmc_cs_mem[cs];
+
        spin_lock(&gpmc_mem_lock);
        if (cs >= gpmc_cs_num || cs < 0 || !gpmc_cs_reserved(cs)) {
                printk(KERN_ERR "Trying to free non-reserved GPMC CS%d\n", cs);
@@ -594,7 +604,8 @@ void gpmc_cs_free(int cs)
                return;
        }
        gpmc_cs_disable_mem(cs);
-       release_resource(&gpmc_cs_mem[cs]);
+       if (res->flags)
+               release_resource(res);
        gpmc_cs_set_reserved(cs, 0);
        spin_unlock(&gpmc_mem_lock);
 }
index 1f33f5db10d5a2dde0d90bd29f1eb6fce9b65995..66c60fe1104c9efabd02e99b8b5ca7755e9738ce 100644 (file)
@@ -2546,11 +2546,12 @@ static int __init _init(struct omap_hwmod *oh, void *data)
                return -EINVAL;
        }
 
-       if (np)
+       if (np) {
                if (of_find_property(np, "ti,no-reset-on-init", NULL))
                        oh->flags |= HWMOD_INIT_NO_RESET;
                if (of_find_property(np, "ti,no-idle-on-init", NULL))
                        oh->flags |= HWMOD_INIT_NO_IDLE;
+       }
 
        oh->_state = _HWMOD_STATE_INITIALIZED;
 
index a123ff0070bd65138394fa6248f611ef2cc10573..71ac7d5f338593e4e7f27ed8b4cb81465b28b7db 100644 (file)
@@ -1964,7 +1964,7 @@ static struct omap_hwmod_irq_info omap3xxx_usb_host_hs_irqs[] = {
 static struct omap_hwmod omap3xxx_usb_host_hs_hwmod = {
        .name           = "usb_host_hs",
        .class          = &omap3xxx_usb_host_hs_hwmod_class,
-       .clkdm_name     = "l3_init_clkdm",
+       .clkdm_name     = "usbhost_clkdm",
        .mpu_irqs       = omap3xxx_usb_host_hs_irqs,
        .main_clk       = "usbhost_48m_fck",
        .prcm = {
@@ -2047,7 +2047,7 @@ static struct omap_hwmod_irq_info omap3xxx_usb_tll_hs_irqs[] = {
 static struct omap_hwmod omap3xxx_usb_tll_hs_hwmod = {
        .name           = "usb_tll_hs",
        .class          = &omap3xxx_usb_tll_hs_hwmod_class,
-       .clkdm_name     = "l3_init_clkdm",
+       .clkdm_name     = "core_l4_clkdm",
        .mpu_irqs       = omap3xxx_usb_tll_hs_irqs,
        .main_clk       = "usbtll_fck",
        .prcm = {
index 1f3770a8a7286fd7650f76d46917408d0ff52b96..87099bb6de692771ce7d26a720f0bec5836d6beb 100644 (file)
@@ -330,10 +330,6 @@ void omap_sram_idle(void)
                        omap3_sram_restore_context();
                        omap2_sms_restore_context();
                }
-               if (core_next_state == PWRDM_POWER_OFF)
-                       omap2_prm_clear_mod_reg_bits(OMAP3430_AUTO_OFF_MASK,
-                                              OMAP3430_GR_MOD,
-                                              OMAP3_PRM_VOLTCTRL_OFFSET);
        }
        omap3_intc_resume_idle();
 
index 8bc02913517cd14a6e96f05295ff94f9fd250ee3..0e1bb46264f9c1bed329538a01088c27903a37ed 100644 (file)
@@ -14,6 +14,7 @@
 
 #include <linux/gpio.h>
 #include <linux/mfd/asic3.h>
+#include "irqs.h" /* PXA_NR_BUILTIN_GPIO */
 
 #define HX4700_ASIC3_GPIO_BASE PXA_NR_BUILTIN_GPIO
 #define HX4700_EGPIO_BASE      (HX4700_ASIC3_GPIO_BASE + ASIC3_NUM_GPIOS)
index dbfa5a26cfff85b1ffdac6afdcc7ab26d21c0177..072842f6491b8efb0bc0ac7e3d3b04e3d6ffaefe 100644 (file)
@@ -152,7 +152,7 @@ static void __init rockchip_smp_prepare_cpus(unsigned int max_cpus)
 
        node = of_find_compatible_node(NULL, NULL, "rockchip,rk3066-pmu");
        if (!node) {
-               pr_err("%s: could not find sram dt node\n", __func__);
+               pr_err("%s: could not find pmu dt node\n", __func__);
                return;
        }
 
index 2858f380beaefba938f6dbdf75ec81874af62168..486063db2a2ffd501ca67cf7d62f0e0750464010 100644 (file)
@@ -992,6 +992,7 @@ static struct asoc_simple_card_info fsi_wm8978_info = {
        .platform       = "sh_fsi2",
        .daifmt         = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_CBM_CFM,
        .cpu_dai = {
+               .fmt    = SND_SOC_DAIFMT_IB_NF,
                .name   = "fsia-dai",
        },
        .codec_dai = {
index f0104bfe544e378c6a778d853fbd3fc822669087..18c7e0311aa679c60a634963a3f64ddf7484dffd 100644 (file)
@@ -588,14 +588,12 @@ static struct asoc_simple_card_info rsnd_card_info = {
        .card           = "SSI01-AK4643",
        .codec          = "ak4642-codec.2-0012",
        .platform       = "rcar_sound",
-       .daifmt         = SND_SOC_DAIFMT_LEFT_J,
+       .daifmt         = SND_SOC_DAIFMT_LEFT_J | SND_SOC_DAIFMT_CBM_CFM,
        .cpu_dai = {
                .name   = "rcar_sound",
-               .fmt    = SND_SOC_DAIFMT_CBS_CFS,
        },
        .codec_dai = {
                .name   = "ak4642-hifi",
-               .fmt    = SND_SOC_DAIFMT_CBM_CFM,
                .sysclk = 11289600,
        },
 };
index 2009a9bc63562af9d761c47f433a4b3d6a80f25d..9989b1b06ffd7dae363552e4d1b973f99f423f0a 100644 (file)
@@ -170,7 +170,7 @@ static struct clk mstp_clks[MSTP_NR] = {
        [MSTP010] = SH_CLK_MSTP32(&p_clk, MSTPCR0, 10, 0), /* SSI2 */
        [MSTP009] = SH_CLK_MSTP32(&p_clk, MSTPCR0,  9, 0), /* SSI3 */
        [MSTP008] = SH_CLK_MSTP32(&p_clk, MSTPCR0,  8, 0), /* SRU */
-       [MSTP007] = SH_CLK_MSTP32(&p_clk, MSTPCR0,  7, 0), /* HSPI */
+       [MSTP007] = SH_CLK_MSTP32(&s_clk, MSTPCR0,  7, 0), /* HSPI */
 };
 
 static struct clk_lookup lookups[] = {
index 64790353951f0c8ca2d071d1c2ba199007fa18d3..26fda4ed4d51413301d148477cd4d18e2bef7884 100644 (file)
@@ -71,7 +71,7 @@ static void clockevent_set_mode(enum clock_event_mode mode,
 static int clockevent_next_event(unsigned long evt,
                                 struct clock_event_device *clk_event_dev);
 
-static void spear_clocksource_init(void)
+static void __init spear_clocksource_init(void)
 {
        u32 tick_rate;
        u16 val;
index 92d660f9610f4ca94092a81749e578372731939f..55b305d51669c576d7b85f6d9fa07f45739ab644 100644 (file)
@@ -70,7 +70,4 @@ config TEGRA_AHB
          which controls AHB bus master arbitration and some
          performance parameters(priority, prefech size).
 
-config TEGRA_EMC_SCALING_ENABLE
-       bool "Enable scaling the memory frequency"
-
 endmenu
index 788495d35cf9ea6d920a69a8fd6cc2a7b46fd7c0..30b993399ed7758062f1f458b05d3fee2c9cd7b8 100644 (file)
@@ -51,12 +51,14 @@ static int dcscb_allcpus_mask[2];
 static int dcscb_power_up(unsigned int cpu, unsigned int cluster)
 {
        unsigned int rst_hold, cpumask = (1 << cpu);
-       unsigned int all_mask = dcscb_allcpus_mask[cluster];
+       unsigned int all_mask;
 
        pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
        if (cpu >= 4 || cluster >= 2)
                return -EINVAL;
 
+       all_mask = dcscb_allcpus_mask[cluster];
+
        /*
         * Since this is called with IRQs enabled, and no arch_spin_lock_irq
         * variant exists, we need to disable IRQs manually here.
@@ -101,11 +103,12 @@ static void dcscb_power_down(void)
        cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
        cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
        cpumask = (1 << cpu);
-       all_mask = dcscb_allcpus_mask[cluster];
 
        pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
        BUG_ON(cpu >= 4 || cluster >= 2);
 
+       all_mask = dcscb_allcpus_mask[cluster];
+
        __mcpm_cpu_going_down(cpu, cluster);
 
        arch_spin_lock(&dcscb_lock);
index c26ef5b92ca78587ce35b0f597a9cea66f9d592a..2c2754e79cb37d3fbcd9aff04ca086e4ba6f5274 100644 (file)
@@ -392,7 +392,7 @@ static irqreturn_t ve_spc_irq_handler(int irq, void *data)
  *  +--------------------------+
  *  | 31      20 | 19        0 |
  *  +--------------------------+
- *  |   u_volt   |  freq(kHz)  |
+ *  |   m_volt   |  freq(kHz)  |
  *  +--------------------------+
  */
 #define MULT_FACTOR    20
@@ -414,7 +414,7 @@ static int ve_spc_populate_opps(uint32_t cluster)
                ret = ve_spc_read_sys_cfg(SYSCFG_SCC, off, &data);
                if (!ret) {
                        opps->freq = (data & FREQ_MASK) * MULT_FACTOR;
-                       opps->u_volt = data >> VOLT_SHIFT;
+                       opps->u_volt = (data >> VOLT_SHIFT) * 1000;
                } else {
                        break;
                }
index f5ad9ee70426b0f2a285cc463739a01173994875..5bf7c3c3b3018aa37a721a222714873bec8b92ff 100644 (file)
@@ -420,29 +420,29 @@ config CPU_32v3
        bool
        select CPU_USE_DOMAINS if MMU
        select NEEDS_SYSCALL_FOR_CMPXCHG if SMP
-       select TLS_REG_EMUL if SMP || !MMU
        select NEED_KUSER_HELPERS
+       select TLS_REG_EMUL if SMP || !MMU
 
 config CPU_32v4
        bool
        select CPU_USE_DOMAINS if MMU
        select NEEDS_SYSCALL_FOR_CMPXCHG if SMP
-       select TLS_REG_EMUL if SMP || !MMU
        select NEED_KUSER_HELPERS
+       select TLS_REG_EMUL if SMP || !MMU
 
 config CPU_32v4T
        bool
        select CPU_USE_DOMAINS if MMU
        select NEEDS_SYSCALL_FOR_CMPXCHG if SMP
-       select TLS_REG_EMUL if SMP || !MMU
        select NEED_KUSER_HELPERS
+       select TLS_REG_EMUL if SMP || !MMU
 
 config CPU_32v5
        bool
        select CPU_USE_DOMAINS if MMU
        select NEEDS_SYSCALL_FOR_CMPXCHG if SMP
-       select TLS_REG_EMUL if SMP || !MMU
        select NEED_KUSER_HELPERS
+       select TLS_REG_EMUL if SMP || !MMU
 
 config CPU_32v6
        bool
index f62aa0677e5c4b69918d1ab36e39fada230d3d59..6b00be1f971e15958cc40c369c88ca872f645aa6 100644 (file)
@@ -1963,8 +1963,8 @@ arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, size_t size)
        mapping->nr_bitmaps = 1;
        mapping->extensions = extensions;
        mapping->base = base;
-       mapping->size = bitmap_size << PAGE_SHIFT;
        mapping->bits = BITS_PER_BYTE * bitmap_size;
+       mapping->size = mapping->bits << PAGE_SHIFT;
 
        spin_lock_init(&mapping->lock);
 
index 6cac43bd1d86c63638993bafa8ba989fea52e91f..423f56dd40283c3f7615a5aec0c1d5422313d61a 100644 (file)
@@ -866,6 +866,8 @@ vfp_double_multiply_accumulate(int dd, int dn, int dm, u32 fpscr, u32 negate, ch
                vdp.sign = vfp_sign_negate(vdp.sign);
 
        vfp_double_unpack(&vdn, vfp_get_double(dd));
+       if (vdn.exponent == 0 && vdn.significand)
+               vfp_double_normalise_denormal(&vdn);
        if (negate & NEG_SUBTRACT)
                vdn.sign = vfp_sign_negate(vdn.sign);
 
index b252631b406bd22bcbe040ede3d3f4dfbea8e987..4f96c1617aaec257a68496464aacbafe1f8d7ab8 100644 (file)
@@ -915,6 +915,8 @@ vfp_single_multiply_accumulate(int sd, int sn, s32 m, u32 fpscr, u32 negate, cha
        v = vfp_get_float(sd);
        pr_debug("VFP: s%u = %08x\n", sd, v);
        vfp_single_unpack(&vsn, v);
+       if (vsn.exponent == 0 && vsn.significand)
+               vfp_single_normalise_denormal(&vsn);
        if (negate & NEG_SUBTRACT)
                vsn.sign = vfp_sign_negate(vsn.sign);
 
index e6e4d3749a6e9d1eef343ec1f502c265a17c8e6d..e759af5d70988ea27959db20c7ef510a621ad335 100644 (file)
@@ -323,8 +323,6 @@ menu "CPU Power Management"
 
 source "drivers/cpuidle/Kconfig"
 
-source "kernel/power/Kconfig"
-
 source "drivers/cpufreq/Kconfig"
 
 endmenu
index 93f4b2dd92484863e8015da4a622a0c17745de5a..f8c40a66e65ddb3d3a4c327379d0eec2b234ce50 100644 (file)
                              <0x0 0x1f21e000 0x0 0x1000>,
                              <0x0 0x1f217000 0x0 0x1000>;
                        interrupts = <0x0 0x86 0x4>;
+                       dma-coherent;
                        status = "disabled";
                        clocks = <&sata01clk 0>;
                        phys = <&phy1 0>;
                              <0x0 0x1f22e000 0x0 0x1000>,
                              <0x0 0x1f227000 0x0 0x1000>;
                        interrupts = <0x0 0x87 0x4>;
+                       dma-coherent;
                        status = "ok";
                        clocks = <&sata23clk 0>;
                        phys = <&phy2 0>;
                              <0x0 0x1f23d000 0x0 0x1000>,
                              <0x0 0x1f23e000 0x0 0x1000>;
                        interrupts = <0x0 0x88 0x4>;
+                       dma-coherent;
                        status = "ok";
                        clocks = <&sata45clk 0>;
                        phys = <&phy3 0>;
index f600d400c07d2cb7e615a13bbe9d02fe3959714d..aff0292c8f4da75957ed0f3bee43ab1f804ae862 100644 (file)
@@ -22,6 +22,9 @@ typedef struct {
        void *vdso;
 } mm_context_t;
 
+#define INIT_MM_CONTEXT(name) \
+       .context.id_lock = __RAW_SPIN_LOCK_UNLOCKED(name.context.id_lock),
+
 #define ASID(mm)       ((mm)->context.id & 0xffff)
 
 extern void paging_init(void);
index 72cadf52ca807f181261b1599b25944374de5544..80e2c08900d68c0e0345fe0d831e6d1070515064 100644 (file)
@@ -19,6 +19,7 @@
 #ifndef __ASM_TLB_H
 #define __ASM_TLB_H
 
+#define  __tlb_remove_pmd_tlb_entry __tlb_remove_pmd_tlb_entry
 
 #include <asm-generic/tlb.h>
 
@@ -99,5 +100,10 @@ static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp,
 }
 #endif
 
+static inline void __tlb_remove_pmd_tlb_entry(struct mmu_gather *tlb, pmd_t *pmdp,
+                                               unsigned long address)
+{
+       tlb_add_flush(tlb, address);
+}
 
 #endif
index bb8eb8a78e67d2c7906f40aa0f4db2ef0c4ef5f5..c8d8fc17bd5a6bb6def9c878acae69fbb7bee359 100644 (file)
@@ -403,8 +403,9 @@ __SYSCALL(378, sys_kcmp)
 __SYSCALL(379, sys_finit_module)
 __SYSCALL(380, sys_sched_setattr)
 __SYSCALL(381, sys_sched_getattr)
+__SYSCALL(382, sys_renameat2)
 
-#define __NR_compat_syscalls           379
+#define __NR_compat_syscalls           383
 
 /*
  * Compat syscall numbers used by the AArch64 kernel.
index ed3955a95747286ebcb3f705c107dc1b3af90423..a7fb874b595edc0c095430792de9c8883590855f 100644 (file)
@@ -318,9 +318,6 @@ static int brk_handler(unsigned long addr, unsigned int esr,
        if (call_break_hook(regs, esr) == DBG_HOOK_HANDLED)
                return 0;
 
-       pr_warn("unexpected brk exception at %lx, esr=0x%x\n",
-                       (long)instruction_pointer(regs), esr);
-
        if (!user_mode(regs))
                return -EFAULT;
 
index ffbbdde7aba10480c12b41d552d1fb41da6097df..2dc36d00addffad4a4bd10ef0a6b1bac21170a49 100644 (file)
@@ -143,10 +143,8 @@ static int __init setup_early_printk(char *buf)
        }
        /* no options parsing yet */
 
-       if (paddr) {
-               set_fixmap_io(FIX_EARLYCON_MEM_BASE, paddr);
-               early_base = (void __iomem *)fix_to_virt(FIX_EARLYCON_MEM_BASE);
-       }
+       if (paddr)
+               early_base = (void __iomem *)set_fixmap_offset_io(FIX_EARLYCON_MEM_BASE, paddr);
 
        printch = match->printch;
        early_console = &early_console_dev;
index 720853f70b6bab01a650e39548872472bcfff0b0..7ec784653b29fad2b1eba1c49a21e9e499a6c167 100644 (file)
@@ -393,11 +393,10 @@ void __init setup_arch(char **cmdline_p)
 
 static int __init arm64_device_init(void)
 {
-       of_clk_init(NULL);
        of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
        return 0;
 }
-arch_initcall(arm64_device_init);
+arch_initcall_sync(arm64_device_init);
 
 static DEFINE_PER_CPU(struct cpu, cpu_data);
 
index 29c39d5d77e31983d49ff2754b0413cd5bd48ff8..6815987b50f822af8ff1e8a8c4f8605fc83a6a7e 100644 (file)
@@ -33,6 +33,7 @@
 #include <linux/irq.h>
 #include <linux/delay.h>
 #include <linux/clocksource.h>
+#include <linux/clk-provider.h>
 
 #include <clocksource/arm_arch_timer.h>
 
@@ -65,6 +66,7 @@ void __init time_init(void)
 {
        u32 arch_timer_rate;
 
+       of_clk_init(NULL);
        clocksource_of_init();
 
        arch_timer_rate = arch_timer_get_rate();
index 0ba347e59f06a7dbfe3fe7dcc884f9435c791d6e..c851eb44dc505f8b250b7e1205b1a5ccb35afc8c 100644 (file)
 #include <linux/slab.h>
 #include <linux/dma-mapping.h>
 #include <linux/dma-contiguous.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
 #include <linux/vmalloc.h>
 #include <linux/swiotlb.h>
+#include <linux/amba/bus.h>
 
 #include <asm/cacheflush.h>
 
@@ -305,17 +308,45 @@ struct dma_map_ops coherent_swiotlb_dma_ops = {
 };
 EXPORT_SYMBOL(coherent_swiotlb_dma_ops);
 
+static int dma_bus_notifier(struct notifier_block *nb,
+                           unsigned long event, void *_dev)
+{
+       struct device *dev = _dev;
+
+       if (event != BUS_NOTIFY_ADD_DEVICE)
+               return NOTIFY_DONE;
+
+       if (of_property_read_bool(dev->of_node, "dma-coherent"))
+               set_dma_ops(dev, &coherent_swiotlb_dma_ops);
+
+       return NOTIFY_OK;
+}
+
+static struct notifier_block platform_bus_nb = {
+       .notifier_call = dma_bus_notifier,
+};
+
+static struct notifier_block amba_bus_nb = {
+       .notifier_call = dma_bus_notifier,
+};
+
 extern int swiotlb_late_init_with_default_size(size_t default_size);
 
 static int __init swiotlb_late_init(void)
 {
        size_t swiotlb_size = min(SZ_64M, MAX_ORDER_NR_PAGES << PAGE_SHIFT);
 
-       dma_ops = &coherent_swiotlb_dma_ops;
+       /*
+        * These must be registered before of_platform_populate().
+        */
+       bus_register_notifier(&platform_bus_type, &platform_bus_nb);
+       bus_register_notifier(&amba_bustype, &amba_bus_nb);
+
+       dma_ops = &noncoherent_swiotlb_dma_ops;
 
        return swiotlb_late_init_with_default_size(swiotlb_size);
 }
-subsys_initcall(swiotlb_late_init);
+arch_initcall(swiotlb_late_init);
 
 #define PREALLOC_DMA_DEBUG_ENTRIES     4096
 
index 6b7e89569a3a9ff8518e7c6ee856603f1e9fb93b..0a472c41a67fa9dc33e9c746c24a402d6f306289 100644 (file)
@@ -374,6 +374,9 @@ int kern_addr_valid(unsigned long addr)
        if (pmd_none(*pmd))
                return 0;
 
+       if (pmd_sect(*pmd))
+               return pfn_valid(pmd_pfn(*pmd));
+
        pte = pte_offset_kernel(pmd, addr);
        if (pte_none(*pte))
                return 0;
diff --git a/arch/hexagon/include/asm/barrier.h b/arch/hexagon/include/asm/barrier.h
deleted file mode 100644 (file)
index 4e863da..0000000
+++ /dev/null
@@ -1,37 +0,0 @@
-/*
- * Memory barrier definitions for the Hexagon architecture
- *
- * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- */
-
-#ifndef _ASM_BARRIER_H
-#define _ASM_BARRIER_H
-
-#define rmb()                          barrier()
-#define read_barrier_depends()         barrier()
-#define wmb()                          barrier()
-#define mb()                           barrier()
-#define smp_rmb()                      barrier()
-#define smp_read_barrier_depends()     barrier()
-#define smp_wmb()                      barrier()
-#define smp_mb()                       barrier()
-
-/*  Set a value and use a memory barrier.  Used by the scheduler somewhere.  */
-#define set_mb(var, value) \
-       do { var = value; mb(); } while (0)
-
-#endif /* _ASM_BARRIER_H */
index bc5efc7c3f3f8ead3608780ba5e2f5b9e212e20c..39d64e0df1de6dd62caf650fdb3ed5f969f280cc 100644 (file)
@@ -91,18 +91,9 @@ extern struct ia64_tr_entry *ia64_idtrs[NR_CPUS];
 #define RR_RID_MASK    0x00000000ffffff00L
 #define RR_TO_RID(val)         ((val >> 8) & 0xffffff)
 
-/*
- * Flush the TLB for address range START to END and, if not in fast mode, release the
- * freed pages that where gathered up to this point.
- */
 static inline void
-ia64_tlb_flush_mmu (struct mmu_gather *tlb, unsigned long start, unsigned long end)
+ia64_tlb_flush_mmu_tlbonly(struct mmu_gather *tlb, unsigned long start, unsigned long end)
 {
-       unsigned long i;
-       unsigned int nr;
-
-       if (!tlb->need_flush)
-               return;
        tlb->need_flush = 0;
 
        if (tlb->fullmm) {
@@ -135,6 +126,14 @@ ia64_tlb_flush_mmu (struct mmu_gather *tlb, unsigned long start, unsigned long e
                flush_tlb_range(&vma, ia64_thash(start), ia64_thash(end));
        }
 
+}
+
+static inline void
+ia64_tlb_flush_mmu_free(struct mmu_gather *tlb)
+{
+       unsigned long i;
+       unsigned int nr;
+
        /* lastly, release the freed pages */
        nr = tlb->nr;
 
@@ -144,6 +143,19 @@ ia64_tlb_flush_mmu (struct mmu_gather *tlb, unsigned long start, unsigned long e
                free_page_and_swap_cache(tlb->pages[i]);
 }
 
+/*
+ * Flush the TLB for address range START to END and, if not in fast mode, release the
+ * freed pages that where gathered up to this point.
+ */
+static inline void
+ia64_tlb_flush_mmu (struct mmu_gather *tlb, unsigned long start, unsigned long end)
+{
+       if (!tlb->need_flush)
+               return;
+       ia64_tlb_flush_mmu_tlbonly(tlb, start, end);
+       ia64_tlb_flush_mmu_free(tlb);
+}
+
 static inline void __tlb_alloc_page(struct mmu_gather *tlb)
 {
        unsigned long addr = __get_free_pages(GFP_NOWAIT | __GFP_NOWARN, 0);
@@ -206,6 +218,16 @@ static inline int __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
        return tlb->max - tlb->nr;
 }
 
+static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
+{
+       ia64_tlb_flush_mmu_tlbonly(tlb, tlb->start_addr, tlb->end_addr);
+}
+
+static inline void tlb_flush_mmu_free(struct mmu_gather *tlb)
+{
+       ia64_tlb_flush_mmu_free(tlb);
+}
+
 static inline void tlb_flush_mmu(struct mmu_gather *tlb)
 {
        ia64_tlb_flush_mmu(tlb, tlb->start_addr, tlb->end_addr);
index c2bb4f896ce788cbba4b6c48cd18ef875e5bdfd6..3aa5b46b2d40d0c7549142072debd16e1667ad81 100644 (file)
@@ -635,7 +635,7 @@ static void octeon_irq_cpu_offline_ciu(struct irq_data *data)
                cpumask_clear(&new_affinity);
                cpumask_set_cpu(cpumask_first(cpu_online_mask), &new_affinity);
        }
-       __irq_set_affinity_locked(data, &new_affinity);
+       irq_set_affinity_locked(data, &new_affinity, false);
 }
 
 static int octeon_irq_ciu_set_affinity(struct irq_data *data,
index a580642555b6f0e7f087ade117ce062cb303d429..348356c99514f0cdfb8876b9f22c0464ab8e3734 100644 (file)
@@ -1,6 +1,8 @@
 # UAPI Header export list
 include include/uapi/asm-generic/Kbuild.asm
 
+generic-y += resource.h
+
 header-y += bitsperlong.h
 header-y += byteorder.h
 header-y += errno.h
@@ -13,7 +15,6 @@ header-y += msgbuf.h
 header-y += pdc.h
 header-y += posix_types.h
 header-y += ptrace.h
-header-y += resource.h
 header-y += sembuf.h
 header-y += setup.h
 header-y += shmbuf.h
diff --git a/arch/parisc/include/uapi/asm/resource.h b/arch/parisc/include/uapi/asm/resource.h
deleted file mode 100644 (file)
index 8b06343..0000000
+++ /dev/null
@@ -1,7 +0,0 @@
-#ifndef _ASM_PARISC_RESOURCE_H
-#define _ASM_PARISC_RESOURCE_H
-
-#define _STK_LIM_MAX   10 * _STK_LIM
-#include <asm-generic/resource.h>
-
-#endif
index a28f02165e97032c8eda569e97b06a4dc81fb0f9..d367a0aece2aac8b067a6c7bf51c43ef3d488eda 100644 (file)
@@ -139,18 +139,18 @@ static struct addr_range prep_initrd(struct addr_range vmlinux, void *chosen,
  * edit the command line passed to vmlinux (by setting /chosen/bootargs).
  * The buffer is put in it's own section so that tools may locate it easier.
  */
-static char cmdline[COMMAND_LINE_SIZE]
+static char cmdline[BOOT_COMMAND_LINE_SIZE]
        __attribute__((__section__("__builtin_cmdline")));
 
 static void prep_cmdline(void *chosen)
 {
        if (cmdline[0] == '\0')
-               getprop(chosen, "bootargs", cmdline, COMMAND_LINE_SIZE-1);
+               getprop(chosen, "bootargs", cmdline, BOOT_COMMAND_LINE_SIZE-1);
 
        printf("\n\rLinux/PowerPC load: %s", cmdline);
        /* If possible, edit the command line */
        if (console_ops.edit_cmdline)
-               console_ops.edit_cmdline(cmdline, COMMAND_LINE_SIZE);
+               console_ops.edit_cmdline(cmdline, BOOT_COMMAND_LINE_SIZE);
        printf("\n\r");
 
        /* Put the command line back into the devtree for the kernel */
@@ -174,7 +174,7 @@ void start(void)
         * built-in command line wasn't set by an external tool */
        if ((loader_info.cmdline_len > 0) && (cmdline[0] == '\0'))
                memmove(cmdline, loader_info.cmdline,
-                       min(loader_info.cmdline_len, COMMAND_LINE_SIZE-1));
+                       min(loader_info.cmdline_len, BOOT_COMMAND_LINE_SIZE-1));
 
        if (console_ops.open && (console_ops.open() < 0))
                exit();
index b3218ce451bb9be8081dd77cc75f0a3114468be1..8aad3c55aeda2885e331b8c7ebd52436c219e74b 100644 (file)
@@ -15,7 +15,7 @@
 #include "types.h"
 #include "string.h"
 
-#define        COMMAND_LINE_SIZE       512
+#define        BOOT_COMMAND_LINE_SIZE  2048
 #define        MAX_PATH_LEN            256
 #define        MAX_PROP_LEN            256 /* What should this be? */
 
index 9954d98871d061dfc9abb8c33fefcbbe00d8d2ea..4ec2d86d3c50571a2a62f27c31f00739595ed219 100644 (file)
@@ -47,13 +47,13 @@ BSS_STACK(4096);
  * The buffer is put in it's own section so that tools may locate it easier.
  */
 
-static char cmdline[COMMAND_LINE_SIZE]
+static char cmdline[BOOT_COMMAND_LINE_SIZE]
        __attribute__((__section__("__builtin_cmdline")));
 
 static void prep_cmdline(void *chosen)
 {
        if (cmdline[0] == '\0')
-               getprop(chosen, "bootargs", cmdline, COMMAND_LINE_SIZE-1);
+               getprop(chosen, "bootargs", cmdline, BOOT_COMMAND_LINE_SIZE-1);
        else
                setprop_str(chosen, "bootargs", cmdline);
 
index a2efdaa020b0f30b11a08b1352b1f9d7388ad3af..66ad7a74116f15dd803ef7e887b7988ac5efa61a 100644 (file)
@@ -41,14 +41,14 @@ struct opal_takeover_args {
  * size except the last one in the list to be as well.
  */
 struct opal_sg_entry {
-       void    *data;
-       long    length;
+       __be64 data;
+       __be64 length;
 };
 
-/* sg list */
+/* SG list */
 struct opal_sg_list {
-       unsigned long num_entries;
-       struct opal_sg_list *next;
+       __be64 length;
+       __be64 next;
        struct opal_sg_entry entry[];
 };
 
@@ -858,8 +858,8 @@ int64_t opal_lpc_write(uint32_t chip_id, enum OpalLPCAddressType addr_type,
 int64_t opal_lpc_read(uint32_t chip_id, enum OpalLPCAddressType addr_type,
                      uint32_t addr, __be32 *data, uint32_t sz);
 
-int64_t opal_read_elog(uint64_t buffer, size_t size, uint64_t log_id);
-int64_t opal_get_elog_size(uint64_t *log_id, size_t *size, uint64_t *elog_type);
+int64_t opal_read_elog(uint64_t buffer, uint64_t size, uint64_t log_id);
+int64_t opal_get_elog_size(__be64 *log_id, __be64 *size, __be64 *elog_type);
 int64_t opal_write_elog(uint64_t buffer, uint64_t size, uint64_t offset);
 int64_t opal_send_ack_elog(uint64_t log_id);
 void opal_resend_pending_logs(void);
@@ -868,23 +868,24 @@ int64_t opal_validate_flash(uint64_t buffer, uint32_t *size, uint32_t *result);
 int64_t opal_manage_flash(uint8_t op);
 int64_t opal_update_flash(uint64_t blk_list);
 int64_t opal_dump_init(uint8_t dump_type);
-int64_t opal_dump_info(uint32_t *dump_id, uint32_t *dump_size);
-int64_t opal_dump_info2(uint32_t *dump_id, uint32_t *dump_size, uint32_t *dump_type);
+int64_t opal_dump_info(__be32 *dump_id, __be32 *dump_size);
+int64_t opal_dump_info2(__be32 *dump_id, __be32 *dump_size, __be32 *dump_type);
 int64_t opal_dump_read(uint32_t dump_id, uint64_t buffer);
 int64_t opal_dump_ack(uint32_t dump_id);
 int64_t opal_dump_resend_notification(void);
 
-int64_t opal_get_msg(uint64_t buffer, size_t size);
-int64_t opal_check_completion(uint64_t buffer, size_t size, uint64_t token);
+int64_t opal_get_msg(uint64_t buffer, uint64_t size);
+int64_t opal_check_completion(uint64_t buffer, uint64_t size, uint64_t token);
 int64_t opal_sync_host_reboot(void);
 int64_t opal_get_param(uint64_t token, uint32_t param_id, uint64_t buffer,
-               size_t length);
+               uint64_t length);
 int64_t opal_set_param(uint64_t token, uint32_t param_id, uint64_t buffer,
-               size_t length);
+               uint64_t length);
 int64_t opal_sensor_read(uint32_t sensor_hndl, int token, __be32 *sensor_data);
 
 /* Internal functions */
-extern int early_init_dt_scan_opal(unsigned long node, const char *uname, int depth, void *data);
+extern int early_init_dt_scan_opal(unsigned long node, const char *uname,
+                                  int depth, void *data);
 extern int early_init_dt_scan_recoverable_ranges(unsigned long node,
                                 const char *uname, int depth, void *data);
 
@@ -893,10 +894,6 @@ extern int opal_put_chars(uint32_t vtermno, const char *buf, int total_len);
 
 extern void hvc_opal_init_early(void);
 
-/* Internal functions */
-extern int early_init_dt_scan_opal(unsigned long node, const char *uname,
-                                  int depth, void *data);
-
 extern int opal_notifier_register(struct notifier_block *nb);
 extern int opal_notifier_unregister(struct notifier_block *nb);
 
@@ -906,9 +903,6 @@ extern void opal_notifier_enable(void);
 extern void opal_notifier_disable(void);
 extern void opal_notifier_update_evt(uint64_t evt_mask, uint64_t evt_val);
 
-extern int opal_get_chars(uint32_t vtermno, char *buf, int count);
-extern int opal_put_chars(uint32_t vtermno, const char *buf, int total_len);
-
 extern int __opal_async_get_token(void);
 extern int opal_async_get_token_interruptible(void);
 extern int __opal_async_release_token(int token);
@@ -916,8 +910,6 @@ extern int opal_async_release_token(int token);
 extern int opal_async_wait_response(uint64_t token, struct opal_msg *msg);
 extern int opal_get_sensor_data(u32 sensor_hndl, u32 *sensor_data);
 
-extern void hvc_opal_init_early(void);
-
 struct rtc_time;
 extern int opal_set_rtc_time(struct rtc_time *tm);
 extern void opal_get_rtc_time(struct rtc_time *tm);
@@ -937,6 +929,10 @@ extern int opal_resync_timebase(void);
 
 extern void opal_lpc_init(void);
 
+struct opal_sg_list *opal_vmalloc_to_sg_list(void *vmalloc_addr,
+                                            unsigned long vmalloc_size);
+void opal_free_sg_list(struct opal_sg_list *sg);
+
 #endif /* __ASSEMBLY__ */
 
 #endif /* __OPAL_H */
index 552df83f1a49627ddd1d49e1ba982c743a5bfefe..ae3fb68cb28e8df5cb53f078ccc5bcc923c8231c 100644 (file)
@@ -1 +1,6 @@
-#include <asm-generic/setup.h>
+#ifndef _UAPI_ASM_POWERPC_SETUP_H
+#define _UAPI_ASM_POWERPC_SETUP_H
+
+#define COMMAND_LINE_SIZE      2048
+
+#endif /* _UAPI_ASM_POWERPC_SETUP_H */
index 3bd77edd7610ce20267a880069972624eafed62e..450850a49dced7919c3c2d349c2d70aae7cea0ad 100644 (file)
@@ -120,6 +120,7 @@ EXPORT_SYMBOL(giveup_spe);
 EXPORT_SYMBOL(flush_instruction_cache);
 #endif
 EXPORT_SYMBOL(flush_dcache_range);
+EXPORT_SYMBOL(flush_icache_range);
 
 #ifdef CONFIG_SMP
 #ifdef CONFIG_PPC32
index 2f3cdb01506de3d7791712ecd6ffeaf1fcd36352..658e89d2025b0b2dd65bb812d2c89d65867c1015 100644 (file)
@@ -705,7 +705,7 @@ static int __init rtas_flash_init(void)
        if (rtas_token("ibm,update-flash-64-and-reboot") ==
                       RTAS_UNKNOWN_SERVICE) {
                pr_info("rtas_flash: no firmware flash support\n");
-               return 1;
+               return -EINVAL;
        }
 
        rtas_validate_flash_data.buf = kzalloc(VALIDATE_BUF_SIZE, GFP_KERNEL);
index ffbb871c2bd803827fa5a78658f29d2fa8a1dbd6..b031f932c0cc3dcc0c452c88f8ba2c3c88cf641d 100644 (file)
@@ -242,6 +242,12 @@ kvm_novcpu_exit:
  */
        .globl  kvm_start_guest
 kvm_start_guest:
+
+       /* Set runlatch bit the minute you wake up from nap */
+       mfspr   r1, SPRN_CTRLF
+       ori     r1, r1, 1
+       mtspr   SPRN_CTRLT, r1
+
        ld      r2,PACATOC(r13)
 
        li      r0,KVM_HWTHREAD_IN_KVM
@@ -309,6 +315,11 @@ kvm_no_guest:
        li      r0, KVM_HWTHREAD_IN_NAP
        stb     r0, HSTATE_HWTHREAD_STATE(r13)
 kvm_do_nap:
+       /* Clear the runlatch bit before napping */
+       mfspr   r2, SPRN_CTRLF
+       clrrdi  r2, r2, 1
+       mtspr   SPRN_CTRLT, r2
+
        li      r3, LPCR_PECE0
        mfspr   r4, SPRN_LPCR
        rlwimi  r4, r3, 0, LPCR_PECE0 | LPCR_PECE1
@@ -1999,8 +2010,13 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206)
 
        /*
         * Take a nap until a decrementer or external or doobell interrupt
-        * occurs, with PECE1, PECE0 and PECEDP set in LPCR
+        * occurs, with PECE1, PECE0 and PECEDP set in LPCR. Also clear the
+        * runlatch bit before napping.
         */
+       mfspr   r2, SPRN_CTRLF
+       clrrdi  r2, r2, 1
+       mtspr   SPRN_CTRLT, r2
+
        li      r0,1
        stb     r0,HSTATE_HWTHREAD_REQ(r13)
        mfspr   r5,SPRN_LPCR
index 3ea26c25590be1dabe4a057882f35b77f5dfe7c1..cf1d325eae8be814953650cf6b94fd349c0fdd12 100644 (file)
@@ -82,17 +82,14 @@ static inline void __tlbie(unsigned long vpn, int psize, int apsize, int ssize)
                va &= ~((1ul << mmu_psize_defs[apsize].shift) - 1);
                va |= penc << 12;
                va |= ssize << 8;
-               /* Add AVAL part */
-               if (psize != apsize) {
-                       /*
-                        * MPSS, 64K base page size and 16MB parge page size
-                        * We don't need all the bits, but rest of the bits
-                        * must be ignored by the processor.
-                        * vpn cover upto 65 bits of va. (0...65) and we need
-                        * 58..64 bits of va.
-                        */
-                       va |= (vpn & 0xfe);
-               }
+               /*
+                * AVAL bits:
+                * We don't need all the bits, but rest of the bits
+                * must be ignored by the processor.
+                * vpn cover upto 65 bits of va. (0...65) and we need
+                * 58..64 bits of va.
+                */
+               va |= (vpn & 0xfe); /* AVAL */
                va |= 1; /* L */
                asm volatile(ASM_FTR_IFCLR("tlbie %0,1", PPC_TLBIE(%1,%0), %2)
                             : : "r" (va), "r"(0), "i" (CPU_FTR_ARCH_206)
@@ -133,17 +130,14 @@ static inline void __tlbiel(unsigned long vpn, int psize, int apsize, int ssize)
                va &= ~((1ul << mmu_psize_defs[apsize].shift) - 1);
                va |= penc << 12;
                va |= ssize << 8;
-               /* Add AVAL part */
-               if (psize != apsize) {
-                       /*
-                        * MPSS, 64K base page size and 16MB parge page size
-                        * We don't need all the bits, but rest of the bits
-                        * must be ignored by the processor.
-                        * vpn cover upto 65 bits of va. (0...65) and we need
-                        * 58..64 bits of va.
-                        */
-                       va |= (vpn & 0xfe);
-               }
+               /*
+                * AVAL bits:
+                * We don't need all the bits, but rest of the bits
+                * must be ignored by the processor.
+                * vpn cover upto 65 bits of va. (0...65) and we need
+                * 58..64 bits of va.
+                */
+               va |= (vpn & 0xfe);
                va |= 1; /* L */
                asm volatile(".long 0x7c000224 | (%0 << 11) | (1 << 21)"
                             : : "r"(va) : "memory");
index 297c9105141365e81316a888176fe80611b5b08a..e0766b82e1656721ff9e93586b47552414936973 100644 (file)
@@ -155,16 +155,28 @@ static ssize_t read_offset_data(void *dest, size_t dest_len,
        return copy_len;
 }
 
-static unsigned long h_get_24x7_catalog_page(char page[static 4096],
-                                            u32 version, u32 index)
+static unsigned long h_get_24x7_catalog_page_(unsigned long phys_4096,
+                                             unsigned long version,
+                                             unsigned long index)
 {
-       WARN_ON(!IS_ALIGNED((unsigned long)page, 4096));
+       pr_devel("h_get_24x7_catalog_page(0x%lx, %lu, %lu)",
+                       phys_4096,
+                       version,
+                       index);
+       WARN_ON(!IS_ALIGNED(phys_4096, 4096));
        return plpar_hcall_norets(H_GET_24X7_CATALOG_PAGE,
-                       virt_to_phys(page),
+                       phys_4096,
                        version,
                        index);
 }
 
+static unsigned long h_get_24x7_catalog_page(char page[],
+                                            u64 version, u32 index)
+{
+       return h_get_24x7_catalog_page_(virt_to_phys(page),
+                                       version, index);
+}
+
 static ssize_t catalog_read(struct file *filp, struct kobject *kobj,
                            struct bin_attribute *bin_attr, char *buf,
                            loff_t offset, size_t count)
@@ -173,7 +185,7 @@ static ssize_t catalog_read(struct file *filp, struct kobject *kobj,
        ssize_t ret = 0;
        size_t catalog_len = 0, catalog_page_len = 0, page_count = 0;
        loff_t page_offset = 0;
-       uint32_t catalog_version_num = 0;
+       uint64_t catalog_version_num = 0;
        void *page = kmem_cache_alloc(hv_page_cache, GFP_USER);
        struct hv_24x7_catalog_page_0 *page_0 = page;
        if (!page)
@@ -185,7 +197,7 @@ static ssize_t catalog_read(struct file *filp, struct kobject *kobj,
                goto e_free;
        }
 
-       catalog_version_num = be32_to_cpu(page_0->version);
+       catalog_version_num = be64_to_cpu(page_0->version);
        catalog_page_len = be32_to_cpu(page_0->length);
        catalog_len = catalog_page_len * 4096;
 
@@ -208,8 +220,9 @@ static ssize_t catalog_read(struct file *filp, struct kobject *kobj,
                                page, 4096, page_offset * 4096);
 e_free:
        if (hret)
-               pr_err("h_get_24x7_catalog_page(ver=%d, page=%lld) failed: rc=%ld\n",
-                               catalog_version_num, page_offset, hret);
+               pr_err("h_get_24x7_catalog_page(ver=%lld, page=%lld) failed:"
+                      " rc=%ld\n",
+                      catalog_version_num, page_offset, hret);
        kfree(page);
 
        pr_devel("catalog_read: offset=%lld(%lld) count=%zu(%zu) catalog_len=%zu(%zu) => %zd\n",
@@ -243,7 +256,7 @@ e_free:                                                             \
 static DEVICE_ATTR_RO(_name)
 
 PAGE_0_ATTR(catalog_version, "%lld\n",
-               (unsigned long long)be32_to_cpu(page_0->version));
+               (unsigned long long)be64_to_cpu(page_0->version));
 PAGE_0_ATTR(catalog_len, "%lld\n",
                (unsigned long long)be32_to_cpu(page_0->length) * 4096);
 static BIN_ATTR_RO(catalog, 0/* real length varies */);
@@ -485,13 +498,13 @@ static int hv_24x7_init(void)
        struct hv_perf_caps caps;
 
        if (!firmware_has_feature(FW_FEATURE_LPAR)) {
-               pr_info("not a virtualized system, not enabling\n");
+               pr_debug("not a virtualized system, not enabling\n");
                return -ENODEV;
        }
 
        hret = hv_perf_caps_get(&caps);
        if (hret) {
-               pr_info("could not obtain capabilities, error 0x%80lx, not enabling\n",
+               pr_debug("could not obtain capabilities, not enabling, rc=%ld\n",
                                hret);
                return -ENODEV;
        }
index 278ba7b9c2b525287f930445e71e04bd538e3236..c9d399a2df82e6727fa78b4de69fb867cc85552f 100644 (file)
@@ -78,7 +78,7 @@ static ssize_t kernel_version_show(struct device *dev,
        return sprintf(page, "0x%x\n", COUNTER_INFO_VERSION_CURRENT);
 }
 
-DEVICE_ATTR_RO(kernel_version);
+static DEVICE_ATTR_RO(kernel_version);
 HV_CAPS_ATTR(version, "0x%x\n");
 HV_CAPS_ATTR(ga, "%d\n");
 HV_CAPS_ATTR(expanded, "%d\n");
@@ -273,13 +273,13 @@ static int hv_gpci_init(void)
        struct hv_perf_caps caps;
 
        if (!firmware_has_feature(FW_FEATURE_LPAR)) {
-               pr_info("not a virtualized system, not enabling\n");
+               pr_debug("not a virtualized system, not enabling\n");
                return -ENODEV;
        }
 
        hret = hv_perf_caps_get(&caps);
        if (hret) {
-               pr_info("could not obtain capabilities, error 0x%80lx, not enabling\n",
+               pr_debug("could not obtain capabilities, not enabling, rc=%ld\n",
                                hret);
                return -ENODEV;
        }
index b9827b0d87e4cd69cdff9f51216ea9429c79fb41..788a1977b9a5203cc9a477be6f9a2a7b71cdd754 100644 (file)
@@ -209,89 +209,20 @@ static struct kobj_type dump_ktype = {
        .default_attrs = dump_default_attrs,
 };
 
-static void free_dump_sg_list(struct opal_sg_list *list)
-{
-       struct opal_sg_list *sg1;
-       while (list) {
-               sg1 = list->next;
-               kfree(list);
-               list = sg1;
-       }
-       list = NULL;
-}
-
-static struct opal_sg_list *dump_data_to_sglist(struct dump_obj *dump)
-{
-       struct opal_sg_list *sg1, *list = NULL;
-       void *addr;
-       int64_t size;
-
-       addr = dump->buffer;
-       size = dump->size;
-
-       sg1 = kzalloc(PAGE_SIZE, GFP_KERNEL);
-       if (!sg1)
-               goto nomem;
-
-       list = sg1;
-       sg1->num_entries = 0;
-       while (size > 0) {
-               /* Translate virtual address to physical address */
-               sg1->entry[sg1->num_entries].data =
-                       (void *)(vmalloc_to_pfn(addr) << PAGE_SHIFT);
-
-               if (size > PAGE_SIZE)
-                       sg1->entry[sg1->num_entries].length = PAGE_SIZE;
-               else
-                       sg1->entry[sg1->num_entries].length = size;
-
-               sg1->num_entries++;
-               if (sg1->num_entries >= SG_ENTRIES_PER_NODE) {
-                       sg1->next = kzalloc(PAGE_SIZE, GFP_KERNEL);
-                       if (!sg1->next)
-                               goto nomem;
-
-                       sg1 = sg1->next;
-                       sg1->num_entries = 0;
-               }
-               addr += PAGE_SIZE;
-               size -= PAGE_SIZE;
-       }
-       return list;
-
-nomem:
-       pr_err("%s : Failed to allocate memory\n", __func__);
-       free_dump_sg_list(list);
-       return NULL;
-}
-
-static void sglist_to_phy_addr(struct opal_sg_list *list)
-{
-       struct opal_sg_list *sg, *next;
-
-       for (sg = list; sg; sg = next) {
-               next = sg->next;
-               /* Don't translate NULL pointer for last entry */
-               if (sg->next)
-                       sg->next = (struct opal_sg_list *)__pa(sg->next);
-               else
-                       sg->next = NULL;
-
-               /* Convert num_entries to length */
-               sg->num_entries =
-                       sg->num_entries * sizeof(struct opal_sg_entry) + 16;
-       }
-}
-
-static int64_t dump_read_info(uint32_t *id, uint32_t *size, uint32_t *type)
+static int64_t dump_read_info(uint32_t *dump_id, uint32_t *dump_size, uint32_t *dump_type)
 {
+       __be32 id, size, type;
        int rc;
-       *type = 0xffffffff;
 
-       rc = opal_dump_info2(id, size, type);
+       type = cpu_to_be32(0xffffffff);
 
+       rc = opal_dump_info2(&id, &size, &type);
        if (rc == OPAL_PARAMETER)
-               rc = opal_dump_info(id, size);
+               rc = opal_dump_info(&id, &size);
+
+       *dump_id = be32_to_cpu(id);
+       *dump_size = be32_to_cpu(size);
+       *dump_type = be32_to_cpu(type);
 
        if (rc)
                pr_warn("%s: Failed to get dump info (%d)\n",
@@ -314,15 +245,12 @@ static int64_t dump_read_data(struct dump_obj *dump)
        }
 
        /* Generate SG list */
-       list = dump_data_to_sglist(dump);
+       list = opal_vmalloc_to_sg_list(dump->buffer, dump->size);
        if (!list) {
                rc = -ENOMEM;
                goto out;
        }
 
-       /* Translate sg list addr to real address */
-       sglist_to_phy_addr(list);
-
        /* First entry address */
        addr = __pa(list);
 
@@ -341,7 +269,7 @@ static int64_t dump_read_data(struct dump_obj *dump)
                        __func__, dump->id);
 
        /* Free SG list */
-       free_dump_sg_list(list);
+       opal_free_sg_list(list);
 
 out:
        return rc;
index ef7bc2a978627422d659d783ea21f0869975df53..10268c41d8302dd39ed73f4e0ad98dd6deb5e25f 100644 (file)
@@ -238,18 +238,25 @@ static struct elog_obj *create_elog_obj(uint64_t id, size_t size, uint64_t type)
 
 static void elog_work_fn(struct work_struct *work)
 {
-       size_t elog_size;
+       __be64 size;
+       __be64 id;
+       __be64 type;
+       uint64_t elog_size;
        uint64_t log_id;
        uint64_t elog_type;
        int rc;
        char name[2+16+1];
 
-       rc = opal_get_elog_size(&log_id, &elog_size, &elog_type);
+       rc = opal_get_elog_size(&id, &size, &type);
        if (rc != OPAL_SUCCESS) {
                pr_err("ELOG: Opal log read failed\n");
                return;
        }
 
+       elog_size = be64_to_cpu(size);
+       log_id = be64_to_cpu(id);
+       elog_type = be64_to_cpu(type);
+
        BUG_ON(elog_size > OPAL_MAX_ERRLOG_SIZE);
 
        if (elog_size >= OPAL_MAX_ERRLOG_SIZE)
index 714ef972406bcacf66a4a896283c6fb25963ca16..dc487ff0470401b0613b28958ff102dbcc5103e1 100644 (file)
@@ -79,9 +79,6 @@
 /* XXX: Assume candidate image size is <= 1GB */
 #define MAX_IMAGE_SIZE 0x40000000
 
-/* Flash sg list version */
-#define SG_LIST_VERSION (1UL)
-
 /* Image status */
 enum {
        IMAGE_INVALID,
@@ -131,11 +128,15 @@ static DEFINE_MUTEX(image_data_mutex);
  */
 static inline void opal_flash_validate(void)
 {
-       struct validate_flash_t *args_buf = &validate_flash_data;
+       long ret;
+       void *buf = validate_flash_data.buf;
+       __be32 size, result;
 
-       args_buf->status = opal_validate_flash(__pa(args_buf->buf),
-                                              &(args_buf->buf_size),
-                                              &(args_buf->result));
+       ret = opal_validate_flash(__pa(buf), &size, &result);
+
+       validate_flash_data.status = ret;
+       validate_flash_data.buf_size = be32_to_cpu(size);
+       validate_flash_data.result = be32_to_cpu(result);
 }
 
 /*
@@ -267,94 +268,12 @@ static ssize_t manage_store(struct kobject *kobj,
        return count;
 }
 
-/*
- * Free sg list
- */
-static void free_sg_list(struct opal_sg_list *list)
-{
-       struct opal_sg_list *sg1;
-       while (list) {
-               sg1 = list->next;
-               kfree(list);
-               list = sg1;
-       }
-       list = NULL;
-}
-
-/*
- * Build candidate image scatter gather list
- *
- * list format:
- *   -----------------------------------
- *  |  VER (8) | Entry length in bytes  |
- *   -----------------------------------
- *  |  Pointer to next entry            |
- *   -----------------------------------
- *  |  Address of memory area 1         |
- *   -----------------------------------
- *  |  Length of memory area 1          |
- *   -----------------------------------
- *  |   .........                       |
- *   -----------------------------------
- *  |   .........                       |
- *   -----------------------------------
- *  |  Address of memory area N         |
- *   -----------------------------------
- *  |  Length of memory area N          |
- *   -----------------------------------
- */
-static struct opal_sg_list *image_data_to_sglist(void)
-{
-       struct opal_sg_list *sg1, *list = NULL;
-       void *addr;
-       int size;
-
-       addr = image_data.data;
-       size = image_data.size;
-
-       sg1 = kzalloc(PAGE_SIZE, GFP_KERNEL);
-       if (!sg1)
-               return NULL;
-
-       list = sg1;
-       sg1->num_entries = 0;
-       while (size > 0) {
-               /* Translate virtual address to physical address */
-               sg1->entry[sg1->num_entries].data =
-                       (void *)(vmalloc_to_pfn(addr) << PAGE_SHIFT);
-
-               if (size > PAGE_SIZE)
-                       sg1->entry[sg1->num_entries].length = PAGE_SIZE;
-               else
-                       sg1->entry[sg1->num_entries].length = size;
-
-               sg1->num_entries++;
-               if (sg1->num_entries >= SG_ENTRIES_PER_NODE) {
-                       sg1->next = kzalloc(PAGE_SIZE, GFP_KERNEL);
-                       if (!sg1->next) {
-                               pr_err("%s : Failed to allocate memory\n",
-                                      __func__);
-                               goto nomem;
-                       }
-
-                       sg1 = sg1->next;
-                       sg1->num_entries = 0;
-               }
-               addr += PAGE_SIZE;
-               size -= PAGE_SIZE;
-       }
-       return list;
-nomem:
-       free_sg_list(list);
-       return NULL;
-}
-
 /*
  * OPAL update flash
  */
 static int opal_flash_update(int op)
 {
-       struct opal_sg_list *sg, *list, *next;
+       struct opal_sg_list *list;
        unsigned long addr;
        int64_t rc = OPAL_PARAMETER;
 
@@ -364,30 +283,13 @@ static int opal_flash_update(int op)
                goto flash;
        }
 
-       list = image_data_to_sglist();
+       list = opal_vmalloc_to_sg_list(image_data.data, image_data.size);
        if (!list)
                goto invalid_img;
 
        /* First entry address */
        addr = __pa(list);
 
-       /* Translate sg list address to absolute */
-       for (sg = list; sg; sg = next) {
-               next = sg->next;
-               /* Don't translate NULL pointer for last entry */
-               if (sg->next)
-                       sg->next = (struct opal_sg_list *)__pa(sg->next);
-               else
-                       sg->next = NULL;
-
-               /*
-                * Convert num_entries to version/length format
-                * to satisfy OPAL.
-                */
-               sg->num_entries = (SG_LIST_VERSION << 56) |
-                       (sg->num_entries * sizeof(struct opal_sg_entry) + 16);
-       }
-
        pr_alert("FLASH: Image is %u bytes\n", image_data.size);
        pr_alert("FLASH: Image update requested\n");
        pr_alert("FLASH: Image will be updated during system reboot\n");
index 6b614726baf2add5f95237647f128c5c7119e173..d202f9bc3683f5ad0072282173ddfb510b1aec9a 100644 (file)
@@ -39,10 +39,11 @@ struct param_attr {
        struct kobj_attribute kobj_attr;
 };
 
-static int opal_get_sys_param(u32 param_id, u32 length, void *buffer)
+static ssize_t opal_get_sys_param(u32 param_id, u32 length, void *buffer)
 {
        struct opal_msg msg;
-       int ret, token;
+       ssize_t ret;
+       int token;
 
        token = opal_async_get_token_interruptible();
        if (token < 0) {
@@ -59,7 +60,7 @@ static int opal_get_sys_param(u32 param_id, u32 length, void *buffer)
 
        ret = opal_async_wait_response(token, &msg);
        if (ret) {
-               pr_err("%s: Failed to wait for the async response, %d\n",
+               pr_err("%s: Failed to wait for the async response, %zd\n",
                                __func__, ret);
                goto out_token;
        }
@@ -111,7 +112,7 @@ static ssize_t sys_param_show(struct kobject *kobj,
 {
        struct param_attr *attr = container_of(kobj_attr, struct param_attr,
                        kobj_attr);
-       int ret;
+       ssize_t ret;
 
        mutex_lock(&opal_sysparam_mutex);
        ret = opal_get_sys_param(attr->param_id, attr->param_size,
@@ -121,9 +122,10 @@ static ssize_t sys_param_show(struct kobject *kobj,
 
        memcpy(buf, param_data_buf, attr->param_size);
 
+       ret = attr->param_size;
 out:
        mutex_unlock(&opal_sysparam_mutex);
-       return ret ? ret : attr->param_size;
+       return ret;
 }
 
 static ssize_t sys_param_store(struct kobject *kobj,
@@ -131,14 +133,20 @@ static ssize_t sys_param_store(struct kobject *kobj,
 {
        struct param_attr *attr = container_of(kobj_attr, struct param_attr,
                        kobj_attr);
-       int ret;
+       ssize_t ret;
+
+        /* MAX_PARAM_DATA_LEN is sizeof(param_data_buf) */
+        if (count > MAX_PARAM_DATA_LEN)
+                count = MAX_PARAM_DATA_LEN;
 
        mutex_lock(&opal_sysparam_mutex);
        memcpy(param_data_buf, buf, count);
        ret = opal_set_sys_param(attr->param_id, attr->param_size,
                        param_data_buf);
        mutex_unlock(&opal_sysparam_mutex);
-       return ret ? ret : count;
+       if (!ret)
+               ret = count;
+       return ret;
 }
 
 void __init opal_sys_param_init(void)
@@ -214,13 +222,13 @@ void __init opal_sys_param_init(void)
        }
 
        if (of_property_read_u32_array(sysparam, "param-len", size, count)) {
-               pr_err("SYSPARAM: Missing propery param-len in the DT\n");
+               pr_err("SYSPARAM: Missing property param-len in the DT\n");
                goto out_free_perm;
        }
 
 
        if (of_property_read_u8_array(sysparam, "param-perm", perm, count)) {
-               pr_err("SYSPARAM: Missing propery param-perm in the DT\n");
+               pr_err("SYSPARAM: Missing property param-perm in the DT\n");
                goto out_free_perm;
        }
 
@@ -233,6 +241,12 @@ void __init opal_sys_param_init(void)
 
        /* For each of the parameters, populate the parameter attributes */
        for (i = 0; i < count; i++) {
+               if (size[i] > MAX_PARAM_DATA_LEN) {
+                       pr_warn("SYSPARAM: Not creating parameter %d as size "
+                               "exceeds buffer length\n", i);
+                       continue;
+               }
+
                sysfs_attr_init(&attr[i].kobj_attr.attr);
                attr[i].param_id = id[i];
                attr[i].param_size = size[i];
index 49d2f00019e5d8092f7f32e9b3c6dfc53cceeee6..360ad80c754ce3c97ad9b9fead5ad4806e5f6666 100644 (file)
@@ -242,14 +242,14 @@ void opal_notifier_update_evt(uint64_t evt_mask,
 void opal_notifier_enable(void)
 {
        int64_t rc;
-       uint64_t evt = 0;
+       __be64 evt = 0;
 
        atomic_set(&opal_notifier_hold, 0);
 
        /* Process pending events */
        rc = opal_poll_events(&evt);
        if (rc == OPAL_SUCCESS && evt)
-               opal_do_notifier(evt);
+               opal_do_notifier(be64_to_cpu(evt));
 }
 
 void opal_notifier_disable(void)
@@ -529,7 +529,7 @@ static irqreturn_t opal_interrupt(int irq, void *data)
 
        opal_handle_interrupt(virq_to_hw(irq), &events);
 
-       opal_do_notifier(events);
+       opal_do_notifier(be64_to_cpu(events));
 
        return IRQ_HANDLED;
 }
@@ -638,3 +638,66 @@ void opal_shutdown(void)
 
 /* Export this so that test modules can use it */
 EXPORT_SYMBOL_GPL(opal_invalid_call);
+
+/* Convert a region of vmalloc memory to an opal sg list */
+struct opal_sg_list *opal_vmalloc_to_sg_list(void *vmalloc_addr,
+                                            unsigned long vmalloc_size)
+{
+       struct opal_sg_list *sg, *first = NULL;
+       unsigned long i = 0;
+
+       sg = kzalloc(PAGE_SIZE, GFP_KERNEL);
+       if (!sg)
+               goto nomem;
+
+       first = sg;
+
+       while (vmalloc_size > 0) {
+               uint64_t data = vmalloc_to_pfn(vmalloc_addr) << PAGE_SHIFT;
+               uint64_t length = min(vmalloc_size, PAGE_SIZE);
+
+               sg->entry[i].data = cpu_to_be64(data);
+               sg->entry[i].length = cpu_to_be64(length);
+               i++;
+
+               if (i >= SG_ENTRIES_PER_NODE) {
+                       struct opal_sg_list *next;
+
+                       next = kzalloc(PAGE_SIZE, GFP_KERNEL);
+                       if (!next)
+                               goto nomem;
+
+                       sg->length = cpu_to_be64(
+                                       i * sizeof(struct opal_sg_entry) + 16);
+                       i = 0;
+                       sg->next = cpu_to_be64(__pa(next));
+                       sg = next;
+               }
+
+               vmalloc_addr += length;
+               vmalloc_size -= length;
+       }
+
+       sg->length = cpu_to_be64(i * sizeof(struct opal_sg_entry) + 16);
+
+       return first;
+
+nomem:
+       pr_err("%s : Failed to allocate memory\n", __func__);
+       opal_free_sg_list(first);
+       return NULL;
+}
+
+void opal_free_sg_list(struct opal_sg_list *sg)
+{
+       while (sg) {
+               uint64_t next = be64_to_cpu(sg->next);
+
+               kfree(sg);
+
+               if (next)
+                       sg = __va(next);
+               else
+                       sg = NULL;
+       }
+}
index 3b2b4fb3585b6b9fac45878041772285591d1d63..98824aa991731882cca87f806f464198f356ff26 100644 (file)
@@ -343,7 +343,6 @@ static void pnv_ioda_setup_same_PE(struct pci_bus *bus, struct pnv_ioda_pe *pe)
                                pci_name(dev));
                        continue;
                }
-               pci_dev_get(dev);
                pdn->pcidev = dev;
                pdn->pe_number = pe->pe_number;
                pe->dma_weight += pnv_ioda_dma_weight(dev);
@@ -462,7 +461,7 @@ static void pnv_pci_ioda_dma_dev_setup(struct pnv_phb *phb, struct pci_dev *pdev
 
        pe = &phb->ioda.pe_array[pdn->pe_number];
        WARN_ON(get_dma_ops(&pdev->dev) != &dma_iommu_ops);
-       set_iommu_table_base_and_group(&pdev->dev, &pe->tce32_table);
+       set_iommu_table_base(&pdev->dev, &pe->tce32_table);
 }
 
 static int pnv_pci_ioda_dma_set_mask(struct pnv_phb *phb,
index 61cf8fa9c61b50489009b94c99c993fffaddde28..8723d32632f55b6eb49b25d55711140f60a95fe7 100644 (file)
@@ -162,18 +162,62 @@ static void pnv_shutdown(void)
 }
 
 #ifdef CONFIG_KEXEC
+static void pnv_kexec_wait_secondaries_down(void)
+{
+       int my_cpu, i, notified = -1;
+
+       my_cpu = get_cpu();
+
+       for_each_online_cpu(i) {
+               uint8_t status;
+               int64_t rc;
+
+               if (i == my_cpu)
+                       continue;
+
+               for (;;) {
+                       rc = opal_query_cpu_status(get_hard_smp_processor_id(i),
+                                                  &status);
+                       if (rc != OPAL_SUCCESS || status != OPAL_THREAD_STARTED)
+                               break;
+                       barrier();
+                       if (i != notified) {
+                               printk(KERN_INFO "kexec: waiting for cpu %d "
+                                      "(physical %d) to enter OPAL\n",
+                                      i, paca[i].hw_cpu_id);
+                               notified = i;
+                       }
+               }
+       }
+}
+
 static void pnv_kexec_cpu_down(int crash_shutdown, int secondary)
 {
        xics_kexec_teardown_cpu(secondary);
 
-       /* Return secondary CPUs to firmware on OPAL v3 */
-       if (firmware_has_feature(FW_FEATURE_OPALv3) && secondary) {
+       /* On OPAL v3, we return all CPUs to firmware */
+
+       if (!firmware_has_feature(FW_FEATURE_OPALv3))
+               return;
+
+       if (secondary) {
+               /* Return secondary CPUs to firmware on OPAL v3 */
                mb();
                get_paca()->kexec_state = KEXEC_STATE_REAL_MODE;
                mb();
 
                /* Return the CPU to OPAL */
                opal_return_cpu();
+       } else if (crash_shutdown) {
+               /*
+                * On crash, we don't wait for secondaries to go
+                * down as they might be unreachable or hung, so
+                * instead we just wait a bit and move on.
+                */
+               mdelay(1);
+       } else {
+               /* Primary waits for the secondaries to have reached OPAL */
+               pnv_kexec_wait_secondaries_down();
        }
 }
 #endif /* CONFIG_KEXEC */
index 908672bdcea6b2c77d75763a05d10f476e2b2d75..bf5fcd452168c6056492115c0232b40f25a2e407 100644 (file)
@@ -30,6 +30,7 @@
 #include <asm/cputhreads.h>
 #include <asm/xics.h>
 #include <asm/opal.h>
+#include <asm/runlatch.h>
 
 #include "powernv.h"
 
@@ -156,7 +157,9 @@ static void pnv_smp_cpu_kill_self(void)
         */
        mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) & ~(u64)LPCR_PECE1);
        while (!generic_check_cpu_restart(cpu)) {
+               ppc64_runlatch_off();
                power7_nap();
+               ppc64_runlatch_on();
                if (!generic_check_cpu_restart(cpu)) {
                        DBG("CPU%d Unexpected exit while offline !\n", cpu);
                        /* We may be getting an IPI, so we re-enable
index 9b8e05078a63e73a2993cc89890793353a45f9ea..20d62975856fb7fa5795a566629bbb69aa7a3139 100644 (file)
@@ -88,13 +88,14 @@ void set_default_offline_state(int cpu)
 
 static void rtas_stop_self(void)
 {
-       struct rtas_args args = {
-               .token = cpu_to_be32(rtas_stop_self_token),
+       static struct rtas_args args = {
                .nargs = 0,
                .nret = 1,
                .rets = &args.args[0],
        };
 
+       args.token = cpu_to_be32(rtas_stop_self_token);
+
        local_irq_disable();
 
        BUG_ON(rtas_stop_self_token == RTAS_UNKNOWN_SERVICE);
index 573b488fc48b8a9b79674806d27599993c96b362..7f75c94af822c40322d8a4a751e983ad0e2bcdae 100644 (file)
@@ -100,10 +100,10 @@ static int pseries_remove_memblock(unsigned long base, unsigned int memblock_siz
 
        start_pfn = base >> PAGE_SHIFT;
 
-       if (!pfn_valid(start_pfn)) {
-               memblock_remove(base, memblock_size);
-               return 0;
-       }
+       lock_device_hotplug();
+
+       if (!pfn_valid(start_pfn))
+               goto out;
 
        block_sz = memory_block_size_bytes();
        sections_per_block = block_sz / MIN_MEMORY_BLOCK_SIZE;
@@ -114,8 +114,10 @@ static int pseries_remove_memblock(unsigned long base, unsigned int memblock_siz
                base += MIN_MEMORY_BLOCK_SIZE;
        }
 
+out:
        /* Update memory regions for memory remove */
        memblock_remove(base, memblock_size);
+       unlock_device_hotplug();
        return 0;
 }
 
index 228cf91b91c14bc4e865e89371595363aa7858cc..ffd1169ebaab8387c6a15831f5c660b1f90fa243 100644 (file)
@@ -25,7 +25,6 @@
 #include <linux/of.h>
 #include <linux/of_platform.h>
 #include <linux/phy.h>
-#include <linux/phy_fixed.h>
 #include <linux/spi/spi.h>
 #include <linux/fsl_devices.h>
 #include <linux/fs_enet_pd.h>
@@ -178,37 +177,6 @@ u32 get_baudrate(void)
 EXPORT_SYMBOL(get_baudrate);
 #endif /* CONFIG_CPM2 */
 
-#ifdef CONFIG_FIXED_PHY
-static int __init of_add_fixed_phys(void)
-{
-       int ret;
-       struct device_node *np;
-       u32 *fixed_link;
-       struct fixed_phy_status status = {};
-
-       for_each_node_by_name(np, "ethernet") {
-               fixed_link  = (u32 *)of_get_property(np, "fixed-link", NULL);
-               if (!fixed_link)
-                       continue;
-
-               status.link = 1;
-               status.duplex = fixed_link[1];
-               status.speed = fixed_link[2];
-               status.pause = fixed_link[3];
-               status.asym_pause = fixed_link[4];
-
-               ret = fixed_phy_add(PHY_POLL, fixed_link[0], &status);
-               if (ret) {
-                       of_node_put(np);
-                       return ret;
-               }
-       }
-
-       return 0;
-}
-arch_initcall(of_add_fixed_phys);
-#endif /* CONFIG_FIXED_PHY */
-
 #if defined(CONFIG_FSL_SOC_BOOKE) || defined(CONFIG_PPC_86xx)
 static __be32 __iomem *rstcr;
 
index 64603a10b86313aace9d570494fcd86853357253..4914fd3f41eca710778ceaee742f29fee5137faf 100644 (file)
@@ -1058,7 +1058,7 @@ static int __init apm821xx_pciex_core_init(struct device_node *np)
        return 1;
 }
 
-static int apm821xx_pciex_init_port_hw(struct ppc4xx_pciex_port *port)
+static int __init apm821xx_pciex_init_port_hw(struct ppc4xx_pciex_port *port)
 {
        u32 val;
 
index 6e670f88d125d79fd2108575a6f6509e74985678..ebc2913f9ee0bd29f1a8594da675a279d38c8640 100644 (file)
@@ -22,8 +22,8 @@ struct ccwgroup_device {
 /* public: */
        unsigned int count;
        struct device   dev;
-       struct ccw_device *cdev[0];
        struct work_struct ungroup_work;
+       struct ccw_device *cdev[0];
 };
 
 /**
index c544b6f05d95e8e6fee2ef5b5f6f24d7acaf19d8..a25f09fbaf3634f4d71ebdf8fa84ca116a07fe4f 100644 (file)
@@ -59,12 +59,23 @@ static inline void tlb_gather_mmu(struct mmu_gather *tlb,
        tlb->batch = NULL;
 }
 
-static inline void tlb_flush_mmu(struct mmu_gather *tlb)
+static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
 {
        __tlb_flush_mm_lazy(tlb->mm);
+}
+
+static inline void tlb_flush_mmu_free(struct mmu_gather *tlb)
+{
        tlb_table_flush(tlb);
 }
 
+
+static inline void tlb_flush_mmu(struct mmu_gather *tlb)
+{
+       tlb_flush_mmu_tlbonly(tlb);
+       tlb_flush_mmu_free(tlb);
+}
+
 static inline void tlb_finish_mmu(struct mmu_gather *tlb,
                                  unsigned long start, unsigned long end)
 {
index 9c36dc398f9070afb4d6151d214623d1cbcf9f4a..452d3ebd9d0fba3b513a6978b096bd22b27c1b46 100644 (file)
@@ -276,7 +276,6 @@ static void bpf_jit_noleaks(struct bpf_jit *jit, struct sock_filter *filter)
        case BPF_S_LD_W_IND:
        case BPF_S_LD_H_IND:
        case BPF_S_LD_B_IND:
-       case BPF_S_LDX_B_MSH:
        case BPF_S_LD_IMM:
        case BPF_S_LD_MEM:
        case BPF_S_MISC_TXA:
index 362192ed12fef1789d2c23d21c654f19632006a0..62f80d2a9df9f35c22b761b58cc7d8d747a689f4 100644 (file)
@@ -86,6 +86,14 @@ tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
        }
 }
 
+static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
+{
+}
+
+static inline void tlb_flush_mmu_free(struct mmu_gather *tlb)
+{
+}
+
 static inline void tlb_flush_mmu(struct mmu_gather *tlb)
 {
 }
index bdbda1453aa9f168339896d6880e3e2acd44182f..04471dc64847269e815a26752ef029cd9206c154 100644 (file)
@@ -238,4 +238,16 @@ static inline __sum16 ip_compute_csum(const void *buff, int len)
        return csum_fold(csum_partial(buff, len, 0));
 }
 
+#define HAVE_ARCH_CSUM_ADD
+static inline __wsum csum_add(__wsum csum, __wsum addend)
+{
+       __asm__ __volatile__(
+               "addcc   %0, %1, %0\n"
+               "addx    %0, %%g0, %0"
+               : "=r" (csum)
+               : "r" (addend), "0" (csum));
+
+       return csum;
+}
+
 #endif /* !(__SPARC_CHECKSUM_H) */
index 019b9615e43c16b81230508109b2484d6313ee62..2ff81ae8f3afa17b0fd25a5a6786131a9e30bc1b 100644 (file)
@@ -164,4 +164,16 @@ static inline __sum16 ip_compute_csum(const void *buff, int len)
        return csum_fold(csum_partial(buff, len, 0));
 }
 
+#define HAVE_ARCH_CSUM_ADD
+static inline __wsum csum_add(__wsum csum, __wsum addend)
+{
+       __asm__ __volatile__(
+               "addcc   %0, %1, %0\n"
+               "addx    %0, %%g0, %0"
+               : "=r" (csum)
+               : "r" (addend), "0" (csum));
+
+       return csum;
+}
+
 #endif /* !(__SPARC64_CHECKSUM_H) */
index 29b0301c18aab26f2a613d397da9856a8d1eec3b..16eb63fac57de1395bc70507c3616f64af68ab4f 100644 (file)
@@ -58,14 +58,26 @@ tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start
 extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
                               unsigned long end);
 
+static inline void
+tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
+{
+       flush_tlb_mm_range(tlb->mm, tlb->start, tlb->end);
+}
+
+static inline void
+tlb_flush_mmu_free(struct mmu_gather *tlb)
+{
+       init_tlb_gather(tlb);
+}
+
 static inline void
 tlb_flush_mmu(struct mmu_gather *tlb)
 {
        if (!tlb->need_flush)
                return;
 
-       flush_tlb_mm_range(tlb->mm, tlb->start, tlb->end);
-       init_tlb_gather(tlb);
+       tlb_flush_mmu_tlbonly(tlb);
+       tlb_flush_mmu_free(tlb);
 }
 
 /* tlb_finish_mmu
index 75298d3358e7f3d2c7ff4e2b2c5ed8b81a8dca6d..08eec0b691b061ade61d8f9df7258ea4bee49f42 100644 (file)
@@ -136,6 +136,7 @@ extern int os_ioctl_generic(int fd, unsigned int cmd, unsigned long arg);
 extern int os_get_ifname(int fd, char *namebuf);
 extern int os_set_slip(int fd);
 extern int os_mode_fd(int fd, int mode);
+extern int os_fsync_file(int fd);
 
 extern int os_seek_file(int fd, unsigned long long offset);
 extern int os_open_file(const char *file, struct openflags flags, int mode);
index f116db15d4028217767f2a7f51dc0bba38b151a9..30fdd5d0067b26c91fb8c831da5a1d4008c79e22 100644 (file)
@@ -103,6 +103,7 @@ void __init setup_physmem(unsigned long start, unsigned long reserve_end,
         */
        os_seek_file(physmem_fd, __pa(&__syscall_stub_start));
        os_write_file(physmem_fd, &__syscall_stub_start, PAGE_SIZE);
+       os_fsync_file(physmem_fd);
 
        bootmap_size = init_bootmem(pfn, pfn + delta);
        free_bootmem(__pa(reserve_end) + bootmap_size,
index 07a750197bb09d3b5ce59b631aed874d58603386..08d90fba952c3e1ba07e0fa488012aa26a6b473c 100644 (file)
@@ -237,6 +237,12 @@ void os_close_file(int fd)
 {
        close(fd);
 }
+int os_fsync_file(int fd)
+{
+       if (fsync(fd) < 0)
+           return -errno;
+       return 0;
+}
 
 int os_seek_file(int fd, unsigned long long offset)
 {
index e1704ff600ff9e677a98a4711d5c8b7b2ff8cf6a..df9191acd926cfb3b5a0c3582549105ac0729d5a 100644 (file)
@@ -151,6 +151,7 @@ int __init main(int argc, char **argv, char **envp)
 #endif
 
        do_uml_initcalls();
+       change_sig(SIGPIPE, 0);
        ret = linux_main(argc, argv);
 
        /*
index 3c4af77e51a2f9047ad603fe1032334111d08e9f..897e9ad0c108ed7ef70571628ed2fde039109508 100644 (file)
 #include <string.h>
 #include <sys/stat.h>
 #include <sys/mman.h>
-#include <sys/param.h>
+#include <sys/vfs.h>
+#include <linux/magic.h>
 #include <init.h>
 #include <os.h>
 
-/* Modified by which_tmpdir, which is called during early boot */
-static char *default_tmpdir = "/tmp";
-
-/*
- *  Modified when creating the physical memory file and when checking
- * the tmp filesystem for usability, both happening during early boot.
- */
+/* Set by make_tempfile() during early boot. */
 static char *tempdir = NULL;
 
-static void __init find_tempdir(void)
+/* Check if dir is on tmpfs. Return 0 if yes, -1 if no or error. */
+static int __init check_tmpfs(const char *dir)
 {
-       const char *dirs[] = { "TMP", "TEMP", "TMPDIR", NULL };
-       int i;
-       char *dir = NULL;
-
-       if (tempdir != NULL)
-               /* We've already been called */
-               return;
-       for (i = 0; dirs[i]; i++) {
-               dir = getenv(dirs[i]);
-               if ((dir != NULL) && (*dir != '\0'))
-                       break;
-       }
-       if ((dir == NULL) || (*dir == '\0'))
-               dir = default_tmpdir;
+       struct statfs st;
 
-       tempdir = malloc(strlen(dir) + 2);
-       if (tempdir == NULL) {
-               fprintf(stderr, "Failed to malloc tempdir, "
-                       "errno = %d\n", errno);
-               return;
-       }
-       strcpy(tempdir, dir);
-       strcat(tempdir, "/");
-}
-
-/*
- * Remove bytes from the front of the buffer and refill it so that if there's a
- * partial string that we care about, it will be completed, and we can recognize
- * it.
- */
-static int pop(int fd, char *buf, size_t size, size_t npop)
-{
-       ssize_t n;
-       size_t len = strlen(&buf[npop]);
-
-       memmove(buf, &buf[npop], len + 1);
-       n = read(fd, &buf[len], size - len - 1);
-       if (n < 0)
-               return -errno;
-
-       buf[len + n] = '\0';
-       return 1;
-}
-
-/*
- * This will return 1, with the first character in buf being the
- * character following the next instance of c in the file.  This will
- * read the file as needed.  If there's an error, -errno is returned;
- * if the end of the file is reached, 0 is returned.
- */
-static int next(int fd, char *buf, size_t size, char c)
-{
-       ssize_t n;
-       char *ptr;
-
-       while ((ptr = strchr(buf, c)) == NULL) {
-               n = read(fd, buf, size - 1);
-               if (n == 0)
-                       return 0;
-               else if (n < 0)
-                       return -errno;
-
-               buf[n] = '\0';
+       printf("Checking if %s is on tmpfs...", dir);
+       if (statfs(dir, &st) < 0) {
+               printf("%s\n", strerror(errno));
+       } else if (st.f_type != TMPFS_MAGIC) {
+               printf("no\n");
+       } else {
+               printf("OK\n");
+               return 0;
        }
-
-       return pop(fd, buf, size, ptr - buf + 1);
+       return -1;
 }
 
 /*
- * Decode an octal-escaped and space-terminated path of the form used by
- * /proc/mounts. May be used to decode a path in-place. "out" must be at least
- * as large as the input. The output is always null-terminated. "len" gets the
- * length of the output, excluding the trailing null. Returns 0 if a full path
- * was successfully decoded, otherwise an error.
+ * Choose the tempdir to use. We want something on tmpfs so that our memory is
+ * not subject to the host's vm.dirty_ratio. If a tempdir is specified in the
+ * environment, we use that even if it's not on tmpfs, but we warn the user.
+ * Otherwise, we try common tmpfs locations, and if no tmpfs directory is found
+ * then we fall back to /tmp.
  */
-static int decode_path(const char *in, char *out, size_t *len)
+static char * __init choose_tempdir(void)
 {
-       char *first = out;
-       int c;
+       static const char * const vars[] = {
+               "TMPDIR",
+               "TMP",
+               "TEMP",
+               NULL
+       };
+       static const char fallback_dir[] = "/tmp";
+       static const char * const tmpfs_dirs[] = {
+               "/dev/shm",
+               fallback_dir,
+               NULL
+       };
        int i;
-       int ret = -EINVAL;
-       while (1) {
-               switch (*in) {
-               case '\0':
-                       goto out;
-
-               case ' ':
-                       ret = 0;
-                       goto out;
-
-               case '\\':
-                       in++;
-                       c = 0;
-                       for (i = 0; i < 3; i++) {
-                               if (*in < '0' || *in > '7')
-                                       goto out;
-                               c = (c << 3) | (*in++ - '0');
-                       }
-                       *(unsigned char *)out++ = (unsigned char) c;
-                       break;
-
-               default:
-                       *out++ = *in++;
-                       break;
+       const char *dir;
+
+       printf("Checking environment variables for a tempdir...");
+       for (i = 0; vars[i]; i++) {
+               dir = getenv(vars[i]);
+               if ((dir != NULL) && (*dir != '\0')) {
+                       printf("%s\n", dir);
+                       if (check_tmpfs(dir) >= 0)
+                               goto done;
+                       else
+                               goto warn;
                }
        }
+       printf("none found\n");
 
-out:
-       *out = '\0';
-       *len = out - first;
-       return ret;
-}
-
-/*
- * Computes the length of s when encoded with three-digit octal escape sequences
- * for the characters in chars.
- */
-static size_t octal_encoded_length(const char *s, const char *chars)
-{
-       size_t len = strlen(s);
-       while ((s = strpbrk(s, chars)) != NULL) {
-               len += 3;
-               s++;
-       }
-
-       return len;
-}
-
-enum {
-       OUTCOME_NOTHING_MOUNTED,
-       OUTCOME_TMPFS_MOUNT,
-       OUTCOME_NON_TMPFS_MOUNT,
-};
-
-/* Read a line of /proc/mounts data looking for a tmpfs mount at "path". */
-static int read_mount(int fd, char *buf, size_t bufsize, const char *path,
-                     int *outcome)
-{
-       int found;
-       int match;
-       char *space;
-       size_t len;
-
-       enum {
-               MATCH_NONE,
-               MATCH_EXACT,
-               MATCH_PARENT,
-       };
-
-       found = next(fd, buf, bufsize, ' ');
-       if (found != 1)
-               return found;
-
-       /*
-        * If there's no following space in the buffer, then this path is
-        * truncated, so it can't be the one we're looking for.
-        */
-       space = strchr(buf, ' ');
-       if (space) {
-               match = MATCH_NONE;
-               if (!decode_path(buf, buf, &len)) {
-                       if (!strcmp(buf, path))
-                               match = MATCH_EXACT;
-                       else if (!strncmp(buf, path, len)
-                                && (path[len] == '/' || !strcmp(buf, "/")))
-                               match = MATCH_PARENT;
-               }
-
-               found = pop(fd, buf, bufsize, space - buf + 1);
-               if (found != 1)
-                       return found;
-
-               switch (match) {
-               case MATCH_EXACT:
-                       if (!strncmp(buf, "tmpfs", strlen("tmpfs")))
-                               *outcome = OUTCOME_TMPFS_MOUNT;
-                       else
-                               *outcome = OUTCOME_NON_TMPFS_MOUNT;
-                       break;
-
-               case MATCH_PARENT:
-                       /* This mount obscures any previous ones. */
-                       *outcome = OUTCOME_NOTHING_MOUNTED;
-                       break;
-               }
+       for (i = 0; tmpfs_dirs[i]; i++) {
+               dir = tmpfs_dirs[i];
+               if (check_tmpfs(dir) >= 0)
+                       goto done;
        }
 
-       return next(fd, buf, bufsize, '\n');
+       dir = fallback_dir;
+warn:
+       printf("Warning: tempdir %s is not on tmpfs\n", dir);
+done:
+       /* Make a copy since getenv results may not remain valid forever. */
+       return strdup(dir);
 }
 
-/* which_tmpdir is called only during early boot */
-static int checked_tmpdir = 0;
-
 /*
- * Look for a tmpfs mounted at /dev/shm.  I couldn't find a cleaner
- * way to do this than to parse /proc/mounts.  statfs will return the
- * same filesystem magic number and fs id for both /dev and /dev/shm
- * when they are both tmpfs, so you can't tell if they are different
- * filesystems.  Also, there seems to be no other way of finding the
- * mount point of a filesystem from within it.
- *
- * If a /dev/shm tmpfs entry is found, then we switch to using it.
- * Otherwise, we stay with the default /tmp.
+ * Create an unlinked tempfile in a suitable tempdir. template must be the
+ * basename part of the template with a leading '/'.
  */
-static void which_tmpdir(void)
+static int __init make_tempfile(const char *template)
 {
+       char *tempname;
        int fd;
-       int found;
-       int outcome;
-       char *path;
-       char *buf;
-       size_t bufsize;
 
-       if (checked_tmpdir)
-               return;
-
-       checked_tmpdir = 1;
-
-       printf("Checking for tmpfs mount on /dev/shm...");
-
-       path = realpath("/dev/shm", NULL);
-       if (!path) {
-               printf("failed to check real path, errno = %d\n", errno);
-               return;
-       }
-       printf("%s...", path);
-
-       /*
-        * The buffer needs to be able to fit the full octal-escaped path, a
-        * space, and a trailing null in order to successfully decode it.
-        */
-       bufsize = octal_encoded_length(path, " \t\n\\") + 2;
-
-       if (bufsize < 128)
-               bufsize = 128;
-
-       buf = malloc(bufsize);
-       if (!buf) {
-               printf("malloc failed, errno = %d\n", errno);
-               goto out;
-       }
-       buf[0] = '\0';
-
-       fd = open("/proc/mounts", O_RDONLY);
-       if (fd < 0) {
-               printf("failed to open /proc/mounts, errno = %d\n", errno);
-               goto out1;
-       }
-
-       outcome = OUTCOME_NOTHING_MOUNTED;
-       while (1) {
-               found = read_mount(fd, buf, bufsize, path, &outcome);
-               if (found != 1)
-                       break;
-       }
-
-       if (found < 0) {
-               printf("read returned errno %d\n", -found);
-       } else {
-               switch (outcome) {
-               case OUTCOME_TMPFS_MOUNT:
-                       printf("OK\n");
-                       default_tmpdir = "/dev/shm";
-                       break;
-
-               case OUTCOME_NON_TMPFS_MOUNT:
-                       printf("not tmpfs\n");
-                       break;
-
-               default:
-                       printf("nothing mounted on /dev/shm\n");
-                       break;
+       if (tempdir == NULL) {
+               tempdir = choose_tempdir();
+               if (tempdir == NULL) {
+                       fprintf(stderr, "Failed to choose tempdir: %s\n",
+                               strerror(errno));
+                       return -1;
                }
        }
 
-       close(fd);
-out1:
-       free(buf);
-out:
-       free(path);
-}
-
-static int __init make_tempfile(const char *template, char **out_tempname,
-                               int do_unlink)
-{
-       char *tempname;
-       int fd;
-
-       which_tmpdir();
-       tempname = malloc(MAXPATHLEN);
+       tempname = malloc(strlen(tempdir) + strlen(template) + 1);
        if (tempname == NULL)
                return -1;
 
-       find_tempdir();
-       if ((tempdir == NULL) || (strlen(tempdir) >= MAXPATHLEN))
-               goto out;
-
-       if (template[0] != '/')
-               strcpy(tempname, tempdir);
-       else
-               tempname[0] = '\0';
-       strncat(tempname, template, MAXPATHLEN-1-strlen(tempname));
+       strcpy(tempname, tempdir);
+       strcat(tempname, template);
        fd = mkstemp(tempname);
        if (fd < 0) {
                fprintf(stderr, "open - cannot create %s: %s\n", tempname,
                        strerror(errno));
                goto out;
        }
-       if (do_unlink && (unlink(tempname) < 0)) {
+       if (unlink(tempname) < 0) {
                perror("unlink");
                goto close;
        }
-       if (out_tempname) {
-               *out_tempname = tempname;
-       } else
-               free(tempname);
+       free(tempname);
        return fd;
 close:
        close(fd);
@@ -351,14 +131,14 @@ out:
        return -1;
 }
 
-#define TEMPNAME_TEMPLATE "vm_file-XXXXXX"
+#define TEMPNAME_TEMPLATE "/vm_file-XXXXXX"
 
 static int __init create_tmp_file(unsigned long long len)
 {
        int fd, err;
        char zero;
 
-       fd = make_tempfile(TEMPNAME_TEMPLATE, NULL, 1);
+       fd = make_tempfile(TEMPNAME_TEMPLATE);
        if (fd < 0)
                exit(1);
 
@@ -402,7 +182,6 @@ int __init create_mem_file(unsigned long long len)
        return fd;
 }
 
-
 void __init check_tmpexec(void)
 {
        void *addr;
@@ -410,14 +189,13 @@ void __init check_tmpexec(void)
 
        addr = mmap(NULL, UM_KERN_PAGE_SIZE,
                    PROT_READ | PROT_WRITE | PROT_EXEC, MAP_PRIVATE, fd, 0);
-       printf("Checking PROT_EXEC mmap in %s...",tempdir);
-       fflush(stdout);
+       printf("Checking PROT_EXEC mmap in %s...", tempdir);
        if (addr == MAP_FAILED) {
                err = errno;
-               perror("failed");
+               printf("%s\n", strerror(err));
                close(fd);
                if (err == EPERM)
-                       printf("%s must be not mounted noexec\n",tempdir);
+                       printf("%s must be not mounted noexec\n", tempdir);
                exit(1);
        }
        printf("OK\n");
index d1b7c377a234e900b0af97d7a784e5cfeedd64f0..ce6ad7e6a7d7c7ba743884bbee2bcf77262f814d 100644 (file)
@@ -83,7 +83,9 @@ else
         KBUILD_CFLAGS += -m64
 
         # Don't autogenerate traditional x87, MMX or SSE instructions
-        KBUILD_CFLAGS += -mno-mmx -mno-sse -mno-80387 -mno-fp-ret-in-387
+        KBUILD_CFLAGS += -mno-mmx -mno-sse
+        KBUILD_CFLAGS += $(call cc-option,-mno-80387)
+        KBUILD_CFLAGS += $(call cc-option,-mno-fp-ret-in-387)
 
        # Use -mpreferred-stack-boundary=3 if supported.
        KBUILD_CFLAGS += $(call cc-option,-mpreferred-stack-boundary=3)
index e6fd8a026c7be574e28d49d321d552d53c72b7cb..cd00e17744914c5b2e1d7d16da887a2809ebb5b5 100644 (file)
@@ -184,8 +184,15 @@ static inline unsigned add32_with_carry(unsigned a, unsigned b)
        asm("addl %2,%0\n\t"
            "adcl $0,%0"
            : "=r" (a)
-           : "0" (a), "r" (b));
+           : "0" (a), "rm" (b));
        return a;
 }
 
+#define HAVE_ARCH_CSUM_ADD
+static inline __wsum csum_add(__wsum csum, __wsum addend)
+{
+       return (__force __wsum)add32_with_carry((__force unsigned)csum,
+                                               (__force unsigned)addend);
+}
+
 #endif /* _ASM_X86_CHECKSUM_64_H */
index 6ad4658de7056e02f104b505f35910587ec712f3..d23aa82e7a7bc25c702be004f804a0c9d02c15f7 100644 (file)
@@ -3425,6 +3425,11 @@ int get_nr_irqs_gsi(void)
        return nr_irqs_gsi;
 }
 
+unsigned int arch_dynirq_lower_bound(unsigned int from)
+{
+       return from < nr_irqs_gsi ? nr_irqs_gsi : from;
+}
+
 int __init arch_probe_nr_irqs(void)
 {
        int nr;
index 7c87424d4140ee488eab90f8c4828a893a7744f6..619f7699487aa1ec60f5a2687bede8e19d0a2c7a 100644 (file)
@@ -543,7 +543,8 @@ static int rapl_cpu_prepare(int cpu)
        if (phys_id < 0)
                return -1;
 
-       if (!rdmsrl_safe(MSR_RAPL_POWER_UNIT, &msr_rapl_power_unit_bits))
+       /* protect rdmsrl() to handle virtualization */
+       if (rdmsrl_safe(MSR_RAPL_POWER_UNIT, &msr_rapl_power_unit_bits))
                return -1;
 
        pmu = kzalloc_node(sizeof(*pmu), GFP_KERNEL, cpu_to_node(cpu));
index f6584a90aba346566d38b6df763a9b0669fd733f..5edc34b5b9514df1f63af487a314ab4053d197a2 100644 (file)
@@ -26,6 +26,9 @@
 
 #define TOPOLOGY_REGISTER_OFFSET 0x10
 
+/* Flag below is initialized once during vSMP PCI initialization. */
+static int irq_routing_comply = 1;
+
 #if defined CONFIG_PCI && defined CONFIG_PARAVIRT
 /*
  * Interrupt control on vSMPowered systems:
@@ -101,6 +104,10 @@ static void __init set_vsmp_pv_ops(void)
 #ifdef CONFIG_SMP
        if (cap & ctl & BIT(8)) {
                ctl &= ~BIT(8);
+
+               /* Interrupt routing set to ignore */
+               irq_routing_comply = 0;
+
 #ifdef CONFIG_PROC_FS
                /* Don't let users change irq affinity via procfs */
                no_irq_affinity = 1;
@@ -218,7 +225,9 @@ static void vsmp_apic_post_init(void)
 {
        /* need to update phys_pkg_id */
        apic->phys_pkg_id = apicid_phys_pkg_id;
-       apic->vector_allocation_domain = fill_vector_allocation_domain;
+
+       if (!irq_routing_comply)
+               apic->vector_allocation_domain = fill_vector_allocation_domain;
 }
 
 void __init vsmp_init(void)
index 1f68c5831924d15dd741032cde2fafc46aae50ab..33e8c028842fb4b0b59bc269a973b195a104cdf8 100644 (file)
@@ -503,7 +503,7 @@ static inline struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu)
                                [number##_HIGH] = VMCS12_OFFSET(name)+4
 
 
-static const unsigned long shadow_read_only_fields[] = {
+static unsigned long shadow_read_only_fields[] = {
        /*
         * We do NOT shadow fields that are modified when L0
         * traps and emulates any vmx instruction (e.g. VMPTRLD,
@@ -526,10 +526,10 @@ static const unsigned long shadow_read_only_fields[] = {
        GUEST_LINEAR_ADDRESS,
        GUEST_PHYSICAL_ADDRESS
 };
-static const int max_shadow_read_only_fields =
+static int max_shadow_read_only_fields =
        ARRAY_SIZE(shadow_read_only_fields);
 
-static const unsigned long shadow_read_write_fields[] = {
+static unsigned long shadow_read_write_fields[] = {
        GUEST_RIP,
        GUEST_RSP,
        GUEST_CR0,
@@ -558,7 +558,7 @@ static const unsigned long shadow_read_write_fields[] = {
        HOST_FS_SELECTOR,
        HOST_GS_SELECTOR
 };
-static const int max_shadow_read_write_fields =
+static int max_shadow_read_write_fields =
        ARRAY_SIZE(shadow_read_write_fields);
 
 static const unsigned short vmcs_field_to_offset_table[] = {
@@ -3009,6 +3009,41 @@ static void free_kvm_area(void)
        }
 }
 
+static void init_vmcs_shadow_fields(void)
+{
+       int i, j;
+
+       /* No checks for read only fields yet */
+
+       for (i = j = 0; i < max_shadow_read_write_fields; i++) {
+               switch (shadow_read_write_fields[i]) {
+               case GUEST_BNDCFGS:
+                       if (!vmx_mpx_supported())
+                               continue;
+                       break;
+               default:
+                       break;
+               }
+
+               if (j < i)
+                       shadow_read_write_fields[j] =
+                               shadow_read_write_fields[i];
+               j++;
+       }
+       max_shadow_read_write_fields = j;
+
+       /* shadowed fields guest access without vmexit */
+       for (i = 0; i < max_shadow_read_write_fields; i++) {
+               clear_bit(shadow_read_write_fields[i],
+                         vmx_vmwrite_bitmap);
+               clear_bit(shadow_read_write_fields[i],
+                         vmx_vmread_bitmap);
+       }
+       for (i = 0; i < max_shadow_read_only_fields; i++)
+               clear_bit(shadow_read_only_fields[i],
+                         vmx_vmread_bitmap);
+}
+
 static __init int alloc_kvm_area(void)
 {
        int cpu;
@@ -3039,6 +3074,8 @@ static __init int hardware_setup(void)
                enable_vpid = 0;
        if (!cpu_has_vmx_shadow_vmcs())
                enable_shadow_vmcs = 0;
+       if (enable_shadow_vmcs)
+               init_vmcs_shadow_fields();
 
        if (!cpu_has_vmx_ept() ||
            !cpu_has_vmx_ept_4levels()) {
@@ -8803,14 +8840,6 @@ static int __init vmx_init(void)
 
        memset(vmx_vmread_bitmap, 0xff, PAGE_SIZE);
        memset(vmx_vmwrite_bitmap, 0xff, PAGE_SIZE);
-       /* shadowed read/write fields */
-       for (i = 0; i < max_shadow_read_write_fields; i++) {
-               clear_bit(shadow_read_write_fields[i], vmx_vmwrite_bitmap);
-               clear_bit(shadow_read_write_fields[i], vmx_vmread_bitmap);
-       }
-       /* shadowed read only fields */
-       for (i = 0; i < max_shadow_read_only_fields; i++)
-               clear_bit(shadow_read_only_fields[i], vmx_vmread_bitmap);
 
        /*
         * Allow direct access to the PC debug port (it is often used for I/O
index 01495755701bd3d068db95df291ef71096d9cf33..6440221ced0d4925d3fee4a0c11b424a6b696f2d 100644 (file)
 
 /*
  * Calling convention :
- * rdi : skb pointer
+ * rbx : skb pointer (callee saved)
  * esi : offset of byte(s) to fetch in skb (can be scratched)
- * r : copy of skb->data
+ * r10 : copy of skb->data
  * r9d : hlen = skb->len - skb->data_len
  */
-#define SKBDATA        %r8
+#define SKBDATA        %r10
 #define SKF_MAX_NEG_OFF    $(-0x200000) /* SKF_LL_OFF from filter.h */
+#define MAX_BPF_STACK (512 /* from filter.h */ + \
+       32 /* space for rbx,r13,r14,r15 */ + \
+       8 /* space for skb_copy_bits */)
 
 sk_load_word:
        .globl  sk_load_word
@@ -68,53 +71,31 @@ sk_load_byte_positive_offset:
        movzbl  (SKBDATA,%rsi),%eax
        ret
 
-/**
- * sk_load_byte_msh - BPF_S_LDX_B_MSH helper
- *
- * Implements BPF_S_LDX_B_MSH : ldxb  4*([offset]&0xf)
- * Must preserve A accumulator (%eax)
- * Inputs : %esi is the offset value
- */
-sk_load_byte_msh:
-       .globl  sk_load_byte_msh
-       test    %esi,%esi
-       js      bpf_slow_path_byte_msh_neg
-
-sk_load_byte_msh_positive_offset:
-       .globl  sk_load_byte_msh_positive_offset
-       cmp     %esi,%r9d      /* if (offset >= hlen) goto bpf_slow_path_byte_msh */
-       jle     bpf_slow_path_byte_msh
-       movzbl  (SKBDATA,%rsi),%ebx
-       and     $15,%bl
-       shl     $2,%bl
-       ret
-
 /* rsi contains offset and can be scratched */
 #define bpf_slow_path_common(LEN)              \
-       push    %rdi;    /* save skb */         \
+       mov     %rbx, %rdi; /* arg1 == skb */   \
        push    %r9;                            \
        push    SKBDATA;                        \
 /* rsi already has offset */                   \
        mov     $LEN,%ecx;      /* len */       \
-       lea     -12(%rbp),%rdx;                 \
+       lea     - MAX_BPF_STACK + 32(%rbp),%rdx;                        \
        call    skb_copy_bits;                  \
        test    %eax,%eax;                      \
        pop     SKBDATA;                        \
-       pop     %r9;                            \
-       pop     %rdi
+       pop     %r9;
 
 
 bpf_slow_path_word:
        bpf_slow_path_common(4)
        js      bpf_error
-       mov     -12(%rbp),%eax
+       mov     - MAX_BPF_STACK + 32(%rbp),%eax
        bswap   %eax
        ret
 
 bpf_slow_path_half:
        bpf_slow_path_common(2)
        js      bpf_error
-       mov     -12(%rbp),%ax
+       mov     - MAX_BPF_STACK + 32(%rbp),%ax
        rol     $8,%ax
        movzwl  %ax,%eax
        ret
@@ -122,21 +103,11 @@ bpf_slow_path_half:
 bpf_slow_path_byte:
        bpf_slow_path_common(1)
        js      bpf_error
-       movzbl  -12(%rbp),%eax
-       ret
-
-bpf_slow_path_byte_msh:
-       xchg    %eax,%ebx /* dont lose A , X is about to be scratched */
-       bpf_slow_path_common(1)
-       js      bpf_error
-       movzbl  -12(%rbp),%eax
-       and     $15,%al
-       shl     $2,%al
-       xchg    %eax,%ebx
+       movzbl  - MAX_BPF_STACK + 32(%rbp),%eax
        ret
 
 #define sk_negative_common(SIZE)                               \
-       push    %rdi;   /* save skb */                          \
+       mov     %rbx, %rdi; /* arg1 == skb */                   \
        push    %r9;                                            \
        push    SKBDATA;                                        \
 /* rsi already has offset */                                   \
@@ -145,10 +116,8 @@ bpf_slow_path_byte_msh:
        test    %rax,%rax;                                      \
        pop     SKBDATA;                                        \
        pop     %r9;                                            \
-       pop     %rdi;                                           \
        jz      bpf_error
 
-
 bpf_slow_path_word_neg:
        cmp     SKF_MAX_NEG_OFF, %esi   /* test range */
        jl      bpf_error       /* offset lower -> error  */
@@ -179,22 +148,12 @@ sk_load_byte_negative_offset:
        movzbl  (%rax), %eax
        ret
 
-bpf_slow_path_byte_msh_neg:
-       cmp     SKF_MAX_NEG_OFF, %esi
-       jl      bpf_error
-sk_load_byte_msh_negative_offset:
-       .globl  sk_load_byte_msh_negative_offset
-       xchg    %eax,%ebx /* dont lose A , X is about to be scratched */
-       sk_negative_common(1)
-       movzbl  (%rax),%eax
-       and     $15,%al
-       shl     $2,%al
-       xchg    %eax,%ebx
-       ret
-
 bpf_error:
 # force a return 0 from jit handler
-       xor             %eax,%eax
-       mov             -8(%rbp),%rbx
+       xor     %eax,%eax
+       mov     - MAX_BPF_STACK(%rbp),%rbx
+       mov     - MAX_BPF_STACK + 8(%rbp),%r13
+       mov     - MAX_BPF_STACK + 16(%rbp),%r14
+       mov     - MAX_BPF_STACK + 24(%rbp),%r15
        leaveq
        ret
index dc017735bb91b7b2ec61f333b091c63accdb921b..92aef8fdac2f394cda85af8e98d68cfa6fcdd1c5 100644 (file)
@@ -1,6 +1,7 @@
 /* bpf_jit_comp.c : BPF JIT compiler
  *
  * Copyright (C) 2011-2013 Eric Dumazet (eric.dumazet@gmail.com)
+ * Internal BPF Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
  *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public License
 #include <linux/if_vlan.h>
 #include <linux/random.h>
 
-/*
- * Conventions :
- *  EAX : BPF A accumulator
- *  EBX : BPF X accumulator
- *  RDI : pointer to skb   (first argument given to JIT function)
- *  RBP : frame pointer (even if CONFIG_FRAME_POINTER=n)
- *  ECX,EDX,ESI : scratch registers
- *  r9d : skb->len - skb->data_len (headlen)
- *  r8  : skb->data
- * -8(RBP) : saved RBX value
- * -16(RBP)..-80(RBP) : BPF_MEMWORDS values
- */
 int bpf_jit_enable __read_mostly;
 
 /*
  * assembly code in arch/x86/net/bpf_jit.S
  */
-extern u8 sk_load_word[], sk_load_half[], sk_load_byte[], sk_load_byte_msh[];
+extern u8 sk_load_word[], sk_load_half[], sk_load_byte[];
 extern u8 sk_load_word_positive_offset[], sk_load_half_positive_offset[];
-extern u8 sk_load_byte_positive_offset[], sk_load_byte_msh_positive_offset[];
+extern u8 sk_load_byte_positive_offset[];
 extern u8 sk_load_word_negative_offset[], sk_load_half_negative_offset[];
-extern u8 sk_load_byte_negative_offset[], sk_load_byte_msh_negative_offset[];
+extern u8 sk_load_byte_negative_offset[];
 
 static inline u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len)
 {
@@ -56,30 +45,44 @@ static inline u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len)
 #define EMIT2(b1, b2)          EMIT((b1) + ((b2) << 8), 2)
 #define EMIT3(b1, b2, b3)      EMIT((b1) + ((b2) << 8) + ((b3) << 16), 3)
 #define EMIT4(b1, b2, b3, b4)   EMIT((b1) + ((b2) << 8) + ((b3) << 16) + ((b4) << 24), 4)
-#define EMIT1_off32(b1, off)   do { EMIT1(b1); EMIT(off, 4);} while (0)
-
-#define CLEAR_A() EMIT2(0x31, 0xc0) /* xor %eax,%eax */
-#define CLEAR_X() EMIT2(0x31, 0xdb) /* xor %ebx,%ebx */
+#define EMIT1_off32(b1, off) \
+       do {EMIT1(b1); EMIT(off, 4); } while (0)
+#define EMIT2_off32(b1, b2, off) \
+       do {EMIT2(b1, b2); EMIT(off, 4); } while (0)
+#define EMIT3_off32(b1, b2, b3, off) \
+       do {EMIT3(b1, b2, b3); EMIT(off, 4); } while (0)
+#define EMIT4_off32(b1, b2, b3, b4, off) \
+       do {EMIT4(b1, b2, b3, b4); EMIT(off, 4); } while (0)
 
 static inline bool is_imm8(int value)
 {
        return value <= 127 && value >= -128;
 }
 
-static inline bool is_near(int offset)
+static inline bool is_simm32(s64 value)
 {
-       return offset <= 127 && offset >= -128;
+       return value == (s64) (s32) value;
 }
 
-#define EMIT_JMP(offset)                                               \
-do {                                                                   \
-       if (offset) {                                                   \
-               if (is_near(offset))                                    \
-                       EMIT2(0xeb, offset); /* jmp .+off8 */           \
-               else                                                    \
-                       EMIT1_off32(0xe9, offset); /* jmp .+off32 */    \
-       }                                                               \
-} while (0)
+/* mov A, X */
+#define EMIT_mov(A, X) \
+       do {if (A != X) \
+               EMIT3(add_2mod(0x48, A, X), 0x89, add_2reg(0xC0, A, X)); \
+       } while (0)
+
+static int bpf_size_to_x86_bytes(int bpf_size)
+{
+       if (bpf_size == BPF_W)
+               return 4;
+       else if (bpf_size == BPF_H)
+               return 2;
+       else if (bpf_size == BPF_B)
+               return 1;
+       else if (bpf_size == BPF_DW)
+               return 4; /* imm32 */
+       else
+               return 0;
+}
 
 /* list of x86 cond jumps opcodes (. + s8)
  * Add 0x10 (and an extra 0x0f) to generate far jumps (. + s32)
@@ -90,27 +93,8 @@ do {                                                                 \
 #define X86_JNE 0x75
 #define X86_JBE 0x76
 #define X86_JA  0x77
-
-#define EMIT_COND_JMP(op, offset)                              \
-do {                                                           \
-       if (is_near(offset))                                    \
-               EMIT2(op, offset); /* jxx .+off8 */             \
-       else {                                                  \
-               EMIT2(0x0f, op + 0x10);                         \
-               EMIT(offset, 4); /* jxx .+off32 */              \
-       }                                                       \
-} while (0)
-
-#define COND_SEL(CODE, TOP, FOP)       \
-       case CODE:                      \
-               t_op = TOP;             \
-               f_op = FOP;             \
-               goto cond_branch
-
-
-#define SEEN_DATAREF 1 /* might call external helpers */
-#define SEEN_XREG    2 /* ebx is used */
-#define SEEN_MEM     4 /* use mem[] for temporary storage */
+#define X86_JGE 0x7D
+#define X86_JG  0x7F
 
 static inline void bpf_flush_icache(void *start, void *end)
 {
@@ -125,26 +109,6 @@ static inline void bpf_flush_icache(void *start, void *end)
 #define CHOOSE_LOAD_FUNC(K, func) \
        ((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset)
 
-/* Helper to find the offset of pkt_type in sk_buff
- * We want to make sure its still a 3bit field starting at a byte boundary.
- */
-#define PKT_TYPE_MAX 7
-static int pkt_type_offset(void)
-{
-       struct sk_buff skb_probe = {
-               .pkt_type = ~0,
-       };
-       char *ct = (char *)&skb_probe;
-       unsigned int off;
-
-       for (off = 0; off < sizeof(struct sk_buff); off++) {
-               if (ct[off] == PKT_TYPE_MAX)
-                       return off;
-       }
-       pr_err_once("Please fix pkt_type_offset(), as pkt_type couldn't be found\n");
-       return -1;
-}
-
 struct bpf_binary_header {
        unsigned int    pages;
        /* Note : for security reasons, bpf code will follow a randomly
@@ -178,583 +142,771 @@ static struct bpf_binary_header *bpf_alloc_binary(unsigned int proglen,
        return header;
 }
 
-void bpf_jit_compile(struct sk_filter *fp)
+/* pick a register outside of BPF range for JIT internal work */
+#define AUX_REG (MAX_BPF_REG + 1)
+
+/* the following table maps BPF registers to x64 registers.
+ * x64 register r12 is unused, since if used as base address register
+ * in load/store instructions, it always needs an extra byte of encoding
+ */
+static const int reg2hex[] = {
+       [BPF_REG_0] = 0,  /* rax */
+       [BPF_REG_1] = 7,  /* rdi */
+       [BPF_REG_2] = 6,  /* rsi */
+       [BPF_REG_3] = 2,  /* rdx */
+       [BPF_REG_4] = 1,  /* rcx */
+       [BPF_REG_5] = 0,  /* r8 */
+       [BPF_REG_6] = 3,  /* rbx callee saved */
+       [BPF_REG_7] = 5,  /* r13 callee saved */
+       [BPF_REG_8] = 6,  /* r14 callee saved */
+       [BPF_REG_9] = 7,  /* r15 callee saved */
+       [BPF_REG_FP] = 5, /* rbp readonly */
+       [AUX_REG] = 3,    /* r11 temp register */
+};
+
+/* is_ereg() == true if BPF register 'reg' maps to x64 r8..r15
+ * which need extra byte of encoding.
+ * rax,rcx,...,rbp have simpler encoding
+ */
+static inline bool is_ereg(u32 reg)
 {
-       u8 temp[64];
-       u8 *prog;
-       unsigned int proglen, oldproglen = 0;
-       int ilen, i;
-       int t_offset, f_offset;
-       u8 t_op, f_op, seen = 0, pass;
-       u8 *image = NULL;
-       struct bpf_binary_header *header = NULL;
-       u8 *func;
-       int pc_ret0 = -1; /* bpf index of first RET #0 instruction (if any) */
-       unsigned int cleanup_addr; /* epilogue code offset */
-       unsigned int *addrs;
-       const struct sock_filter *filter = fp->insns;
-       int flen = fp->len;
+       if (reg == BPF_REG_5 || reg == AUX_REG ||
+           (reg >= BPF_REG_7 && reg <= BPF_REG_9))
+               return true;
+       else
+               return false;
+}
 
-       if (!bpf_jit_enable)
-               return;
+/* add modifiers if 'reg' maps to x64 registers r8..r15 */
+static inline u8 add_1mod(u8 byte, u32 reg)
+{
+       if (is_ereg(reg))
+               byte |= 1;
+       return byte;
+}
 
-       addrs = kmalloc(flen * sizeof(*addrs), GFP_KERNEL);
-       if (addrs == NULL)
-               return;
+static inline u8 add_2mod(u8 byte, u32 r1, u32 r2)
+{
+       if (is_ereg(r1))
+               byte |= 1;
+       if (is_ereg(r2))
+               byte |= 4;
+       return byte;
+}
 
-       /* Before first pass, make a rough estimation of addrs[]
-        * each bpf instruction is translated to less than 64 bytes
+/* encode dest register 'a_reg' into x64 opcode 'byte' */
+static inline u8 add_1reg(u8 byte, u32 a_reg)
+{
+       return byte + reg2hex[a_reg];
+}
+
+/* encode dest 'a_reg' and src 'x_reg' registers into x64 opcode 'byte' */
+static inline u8 add_2reg(u8 byte, u32 a_reg, u32 x_reg)
+{
+       return byte + reg2hex[a_reg] + (reg2hex[x_reg] << 3);
+}
+
+struct jit_context {
+       unsigned int cleanup_addr; /* epilogue code offset */
+       bool seen_ld_abs;
+};
+
+static int do_jit(struct sk_filter *bpf_prog, int *addrs, u8 *image,
+                 int oldproglen, struct jit_context *ctx)
+{
+       struct sock_filter_int *insn = bpf_prog->insnsi;
+       int insn_cnt = bpf_prog->len;
+       u8 temp[64];
+       int i;
+       int proglen = 0;
+       u8 *prog = temp;
+       int stacksize = MAX_BPF_STACK +
+               32 /* space for rbx, r13, r14, r15 */ +
+               8 /* space for skb_copy_bits() buffer */;
+
+       EMIT1(0x55); /* push rbp */
+       EMIT3(0x48, 0x89, 0xE5); /* mov rbp,rsp */
+
+       /* sub rsp, stacksize */
+       EMIT3_off32(0x48, 0x81, 0xEC, stacksize);
+
+       /* all classic BPF filters use R6(rbx) save it */
+
+       /* mov qword ptr [rbp-X],rbx */
+       EMIT3_off32(0x48, 0x89, 0x9D, -stacksize);
+
+       /* sk_convert_filter() maps classic BPF register X to R7 and uses R8
+        * as temporary, so all tcpdump filters need to spill/fill R7(r13) and
+        * R8(r14). R9(r15) spill could be made conditional, but there is only
+        * one 'bpf_error' return path out of helper functions inside bpf_jit.S
+        * The overhead of extra spill is negligible for any filter other
+        * than synthetic ones. Therefore not worth adding complexity.
         */
-       for (proglen = 0, i = 0; i < flen; i++) {
-               proglen += 64;
-               addrs[i] = proglen;
+
+       /* mov qword ptr [rbp-X],r13 */
+       EMIT3_off32(0x4C, 0x89, 0xAD, -stacksize + 8);
+       /* mov qword ptr [rbp-X],r14 */
+       EMIT3_off32(0x4C, 0x89, 0xB5, -stacksize + 16);
+       /* mov qword ptr [rbp-X],r15 */
+       EMIT3_off32(0x4C, 0x89, 0xBD, -stacksize + 24);
+
+       /* clear A and X registers */
+       EMIT2(0x31, 0xc0); /* xor eax, eax */
+       EMIT3(0x4D, 0x31, 0xED); /* xor r13, r13 */
+
+       if (ctx->seen_ld_abs) {
+               /* r9d : skb->len - skb->data_len (headlen)
+                * r10 : skb->data
+                */
+               if (is_imm8(offsetof(struct sk_buff, len)))
+                       /* mov %r9d, off8(%rdi) */
+                       EMIT4(0x44, 0x8b, 0x4f,
+                             offsetof(struct sk_buff, len));
+               else
+                       /* mov %r9d, off32(%rdi) */
+                       EMIT3_off32(0x44, 0x8b, 0x8f,
+                                   offsetof(struct sk_buff, len));
+
+               if (is_imm8(offsetof(struct sk_buff, data_len)))
+                       /* sub %r9d, off8(%rdi) */
+                       EMIT4(0x44, 0x2b, 0x4f,
+                             offsetof(struct sk_buff, data_len));
+               else
+                       EMIT3_off32(0x44, 0x2b, 0x8f,
+                                   offsetof(struct sk_buff, data_len));
+
+               if (is_imm8(offsetof(struct sk_buff, data)))
+                       /* mov %r10, off8(%rdi) */
+                       EMIT4(0x4c, 0x8b, 0x57,
+                             offsetof(struct sk_buff, data));
+               else
+                       /* mov %r10, off32(%rdi) */
+                       EMIT3_off32(0x4c, 0x8b, 0x97,
+                                   offsetof(struct sk_buff, data));
        }
-       cleanup_addr = proglen; /* epilogue address */
 
-       for (pass = 0; pass < 10; pass++) {
-               u8 seen_or_pass0 = (pass == 0) ? (SEEN_XREG | SEEN_DATAREF | SEEN_MEM) : seen;
-               /* no prologue/epilogue for trivial filters (RET something) */
-               proglen = 0;
-               prog = temp;
+       for (i = 0; i < insn_cnt; i++, insn++) {
+               const s32 K = insn->imm;
+               u32 a_reg = insn->a_reg;
+               u32 x_reg = insn->x_reg;
+               u8 b1 = 0, b2 = 0, b3 = 0;
+               s64 jmp_offset;
+               u8 jmp_cond;
+               int ilen;
+               u8 *func;
+
+               switch (insn->code) {
+                       /* ALU */
+               case BPF_ALU | BPF_ADD | BPF_X:
+               case BPF_ALU | BPF_SUB | BPF_X:
+               case BPF_ALU | BPF_AND | BPF_X:
+               case BPF_ALU | BPF_OR | BPF_X:
+               case BPF_ALU | BPF_XOR | BPF_X:
+               case BPF_ALU64 | BPF_ADD | BPF_X:
+               case BPF_ALU64 | BPF_SUB | BPF_X:
+               case BPF_ALU64 | BPF_AND | BPF_X:
+               case BPF_ALU64 | BPF_OR | BPF_X:
+               case BPF_ALU64 | BPF_XOR | BPF_X:
+                       switch (BPF_OP(insn->code)) {
+                       case BPF_ADD: b2 = 0x01; break;
+                       case BPF_SUB: b2 = 0x29; break;
+                       case BPF_AND: b2 = 0x21; break;
+                       case BPF_OR: b2 = 0x09; break;
+                       case BPF_XOR: b2 = 0x31; break;
+                       }
+                       if (BPF_CLASS(insn->code) == BPF_ALU64)
+                               EMIT1(add_2mod(0x48, a_reg, x_reg));
+                       else if (is_ereg(a_reg) || is_ereg(x_reg))
+                               EMIT1(add_2mod(0x40, a_reg, x_reg));
+                       EMIT2(b2, add_2reg(0xC0, a_reg, x_reg));
+                       break;
 
-               if (seen_or_pass0) {
-                       EMIT4(0x55, 0x48, 0x89, 0xe5); /* push %rbp; mov %rsp,%rbp */
-                       EMIT4(0x48, 0x83, 0xec, 96);    /* subq  $96,%rsp       */
-                       /* note : must save %rbx in case bpf_error is hit */
-                       if (seen_or_pass0 & (SEEN_XREG | SEEN_DATAREF))
-                               EMIT4(0x48, 0x89, 0x5d, 0xf8); /* mov %rbx, -8(%rbp) */
-                       if (seen_or_pass0 & SEEN_XREG)
-                               CLEAR_X(); /* make sure we dont leek kernel memory */
-
-                       /*
-                        * If this filter needs to access skb data,
-                        * loads r9 and r8 with :
-                        *  r9 = skb->len - skb->data_len
-                        *  r8 = skb->data
+                       /* mov A, X */
+               case BPF_ALU64 | BPF_MOV | BPF_X:
+                       EMIT_mov(a_reg, x_reg);
+                       break;
+
+                       /* mov32 A, X */
+               case BPF_ALU | BPF_MOV | BPF_X:
+                       if (is_ereg(a_reg) || is_ereg(x_reg))
+                               EMIT1(add_2mod(0x40, a_reg, x_reg));
+                       EMIT2(0x89, add_2reg(0xC0, a_reg, x_reg));
+                       break;
+
+                       /* neg A */
+               case BPF_ALU | BPF_NEG:
+               case BPF_ALU64 | BPF_NEG:
+                       if (BPF_CLASS(insn->code) == BPF_ALU64)
+                               EMIT1(add_1mod(0x48, a_reg));
+                       else if (is_ereg(a_reg))
+                               EMIT1(add_1mod(0x40, a_reg));
+                       EMIT2(0xF7, add_1reg(0xD8, a_reg));
+                       break;
+
+               case BPF_ALU | BPF_ADD | BPF_K:
+               case BPF_ALU | BPF_SUB | BPF_K:
+               case BPF_ALU | BPF_AND | BPF_K:
+               case BPF_ALU | BPF_OR | BPF_K:
+               case BPF_ALU | BPF_XOR | BPF_K:
+               case BPF_ALU64 | BPF_ADD | BPF_K:
+               case BPF_ALU64 | BPF_SUB | BPF_K:
+               case BPF_ALU64 | BPF_AND | BPF_K:
+               case BPF_ALU64 | BPF_OR | BPF_K:
+               case BPF_ALU64 | BPF_XOR | BPF_K:
+                       if (BPF_CLASS(insn->code) == BPF_ALU64)
+                               EMIT1(add_1mod(0x48, a_reg));
+                       else if (is_ereg(a_reg))
+                               EMIT1(add_1mod(0x40, a_reg));
+
+                       switch (BPF_OP(insn->code)) {
+                       case BPF_ADD: b3 = 0xC0; break;
+                       case BPF_SUB: b3 = 0xE8; break;
+                       case BPF_AND: b3 = 0xE0; break;
+                       case BPF_OR: b3 = 0xC8; break;
+                       case BPF_XOR: b3 = 0xF0; break;
+                       }
+
+                       if (is_imm8(K))
+                               EMIT3(0x83, add_1reg(b3, a_reg), K);
+                       else
+                               EMIT2_off32(0x81, add_1reg(b3, a_reg), K);
+                       break;
+
+               case BPF_ALU64 | BPF_MOV | BPF_K:
+                       /* optimization: if imm32 is positive,
+                        * use 'mov eax, imm32' (which zero-extends imm32)
+                        * to save 2 bytes
                         */
-                       if (seen_or_pass0 & SEEN_DATAREF) {
-                               if (offsetof(struct sk_buff, len) <= 127)
-                                       /* mov    off8(%rdi),%r9d */
-                                       EMIT4(0x44, 0x8b, 0x4f, offsetof(struct sk_buff, len));
-                               else {
-                                       /* mov    off32(%rdi),%r9d */
-                                       EMIT3(0x44, 0x8b, 0x8f);
-                                       EMIT(offsetof(struct sk_buff, len), 4);
-                               }
-                               if (is_imm8(offsetof(struct sk_buff, data_len)))
-                                       /* sub    off8(%rdi),%r9d */
-                                       EMIT4(0x44, 0x2b, 0x4f, offsetof(struct sk_buff, data_len));
-                               else {
-                                       EMIT3(0x44, 0x2b, 0x8f);
-                                       EMIT(offsetof(struct sk_buff, data_len), 4);
-                               }
+                       if (K < 0) {
+                               /* 'mov rax, imm32' sign extends imm32 */
+                               b1 = add_1mod(0x48, a_reg);
+                               b2 = 0xC7;
+                               b3 = 0xC0;
+                               EMIT3_off32(b1, b2, add_1reg(b3, a_reg), K);
+                               break;
+                       }
 
-                               if (is_imm8(offsetof(struct sk_buff, data)))
-                                       /* mov off8(%rdi),%r8 */
-                                       EMIT4(0x4c, 0x8b, 0x47, offsetof(struct sk_buff, data));
-                               else {
-                                       /* mov off32(%rdi),%r8 */
-                                       EMIT3(0x4c, 0x8b, 0x87);
-                                       EMIT(offsetof(struct sk_buff, data), 4);
-                               }
+               case BPF_ALU | BPF_MOV | BPF_K:
+                       /* mov %eax, imm32 */
+                       if (is_ereg(a_reg))
+                               EMIT1(add_1mod(0x40, a_reg));
+                       EMIT1_off32(add_1reg(0xB8, a_reg), K);
+                       break;
+
+                       /* A %= X, A /= X, A %= K, A /= K */
+               case BPF_ALU | BPF_MOD | BPF_X:
+               case BPF_ALU | BPF_DIV | BPF_X:
+               case BPF_ALU | BPF_MOD | BPF_K:
+               case BPF_ALU | BPF_DIV | BPF_K:
+               case BPF_ALU64 | BPF_MOD | BPF_X:
+               case BPF_ALU64 | BPF_DIV | BPF_X:
+               case BPF_ALU64 | BPF_MOD | BPF_K:
+               case BPF_ALU64 | BPF_DIV | BPF_K:
+                       EMIT1(0x50); /* push rax */
+                       EMIT1(0x52); /* push rdx */
+
+                       if (BPF_SRC(insn->code) == BPF_X)
+                               /* mov r11, X */
+                               EMIT_mov(AUX_REG, x_reg);
+                       else
+                               /* mov r11, K */
+                               EMIT3_off32(0x49, 0xC7, 0xC3, K);
+
+                       /* mov rax, A */
+                       EMIT_mov(BPF_REG_0, a_reg);
+
+                       /* xor edx, edx
+                        * equivalent to 'xor rdx, rdx', but one byte less
+                        */
+                       EMIT2(0x31, 0xd2);
+
+                       if (BPF_SRC(insn->code) == BPF_X) {
+                               /* if (X == 0) return 0 */
+
+                               /* cmp r11, 0 */
+                               EMIT4(0x49, 0x83, 0xFB, 0x00);
+
+                               /* jne .+9 (skip over pop, pop, xor and jmp) */
+                               EMIT2(X86_JNE, 1 + 1 + 2 + 5);
+                               EMIT1(0x5A); /* pop rdx */
+                               EMIT1(0x58); /* pop rax */
+                               EMIT2(0x31, 0xc0); /* xor eax, eax */
+
+                               /* jmp cleanup_addr
+                                * addrs[i] - 11, because there are 11 bytes
+                                * after this insn: div, mov, pop, pop, mov
+                                */
+                               jmp_offset = ctx->cleanup_addr - (addrs[i] - 11);
+                               EMIT1_off32(0xE9, jmp_offset);
                        }
-               }
 
-               switch (filter[0].code) {
-               case BPF_S_RET_K:
-               case BPF_S_LD_W_LEN:
-               case BPF_S_ANC_PROTOCOL:
-               case BPF_S_ANC_IFINDEX:
-               case BPF_S_ANC_MARK:
-               case BPF_S_ANC_RXHASH:
-               case BPF_S_ANC_CPU:
-               case BPF_S_ANC_VLAN_TAG:
-               case BPF_S_ANC_VLAN_TAG_PRESENT:
-               case BPF_S_ANC_QUEUE:
-               case BPF_S_ANC_PKTTYPE:
-               case BPF_S_LD_W_ABS:
-               case BPF_S_LD_H_ABS:
-               case BPF_S_LD_B_ABS:
-                       /* first instruction sets A register (or is RET 'constant') */
+                       if (BPF_CLASS(insn->code) == BPF_ALU64)
+                               /* div r11 */
+                               EMIT3(0x49, 0xF7, 0xF3);
+                       else
+                               /* div r11d */
+                               EMIT3(0x41, 0xF7, 0xF3);
+
+                       if (BPF_OP(insn->code) == BPF_MOD)
+                               /* mov r11, rdx */
+                               EMIT3(0x49, 0x89, 0xD3);
+                       else
+                               /* mov r11, rax */
+                               EMIT3(0x49, 0x89, 0xC3);
+
+                       EMIT1(0x5A); /* pop rdx */
+                       EMIT1(0x58); /* pop rax */
+
+                       /* mov A, r11 */
+                       EMIT_mov(a_reg, AUX_REG);
                        break;
-               default:
-                       /* make sure we dont leak kernel information to user */
-                       CLEAR_A(); /* A = 0 */
-               }
 
-               for (i = 0; i < flen; i++) {
-                       unsigned int K = filter[i].k;
+               case BPF_ALU | BPF_MUL | BPF_K:
+               case BPF_ALU | BPF_MUL | BPF_X:
+               case BPF_ALU64 | BPF_MUL | BPF_K:
+               case BPF_ALU64 | BPF_MUL | BPF_X:
+                       EMIT1(0x50); /* push rax */
+                       EMIT1(0x52); /* push rdx */
+
+                       /* mov r11, A */
+                       EMIT_mov(AUX_REG, a_reg);
+
+                       if (BPF_SRC(insn->code) == BPF_X)
+                               /* mov rax, X */
+                               EMIT_mov(BPF_REG_0, x_reg);
+                       else
+                               /* mov rax, K */
+                               EMIT3_off32(0x48, 0xC7, 0xC0, K);
+
+                       if (BPF_CLASS(insn->code) == BPF_ALU64)
+                               EMIT1(add_1mod(0x48, AUX_REG));
+                       else if (is_ereg(AUX_REG))
+                               EMIT1(add_1mod(0x40, AUX_REG));
+                       /* mul(q) r11 */
+                       EMIT2(0xF7, add_1reg(0xE0, AUX_REG));
+
+                       /* mov r11, rax */
+                       EMIT_mov(AUX_REG, BPF_REG_0);
+
+                       EMIT1(0x5A); /* pop rdx */
+                       EMIT1(0x58); /* pop rax */
+
+                       /* mov A, r11 */
+                       EMIT_mov(a_reg, AUX_REG);
+                       break;
 
-                       switch (filter[i].code) {
-                       case BPF_S_ALU_ADD_X: /* A += X; */
-                               seen |= SEEN_XREG;
-                               EMIT2(0x01, 0xd8);              /* add %ebx,%eax */
-                               break;
-                       case BPF_S_ALU_ADD_K: /* A += K; */
-                               if (!K)
-                                       break;
-                               if (is_imm8(K))
-                                       EMIT3(0x83, 0xc0, K);   /* add imm8,%eax */
-                               else
-                                       EMIT1_off32(0x05, K);   /* add imm32,%eax */
-                               break;
-                       case BPF_S_ALU_SUB_X: /* A -= X; */
-                               seen |= SEEN_XREG;
-                               EMIT2(0x29, 0xd8);              /* sub    %ebx,%eax */
-                               break;
-                       case BPF_S_ALU_SUB_K: /* A -= K */
-                               if (!K)
-                                       break;
-                               if (is_imm8(K))
-                                       EMIT3(0x83, 0xe8, K); /* sub imm8,%eax */
-                               else
-                                       EMIT1_off32(0x2d, K); /* sub imm32,%eax */
-                               break;
-                       case BPF_S_ALU_MUL_X: /* A *= X; */
-                               seen |= SEEN_XREG;
-                               EMIT3(0x0f, 0xaf, 0xc3);        /* imul %ebx,%eax */
-                               break;
-                       case BPF_S_ALU_MUL_K: /* A *= K */
-                               if (is_imm8(K))
-                                       EMIT3(0x6b, 0xc0, K); /* imul imm8,%eax,%eax */
-                               else {
-                                       EMIT2(0x69, 0xc0);              /* imul imm32,%eax */
-                                       EMIT(K, 4);
-                               }
-                               break;
-                       case BPF_S_ALU_DIV_X: /* A /= X; */
-                               seen |= SEEN_XREG;
-                               EMIT2(0x85, 0xdb);      /* test %ebx,%ebx */
-                               if (pc_ret0 > 0) {
-                                       /* addrs[pc_ret0 - 1] is start address of target
-                                        * (addrs[i] - 4) is the address following this jmp
-                                        * ("xor %edx,%edx; div %ebx" being 4 bytes long)
-                                        */
-                                       EMIT_COND_JMP(X86_JE, addrs[pc_ret0 - 1] -
-                                                               (addrs[i] - 4));
-                               } else {
-                                       EMIT_COND_JMP(X86_JNE, 2 + 5);
-                                       CLEAR_A();
-                                       EMIT1_off32(0xe9, cleanup_addr - (addrs[i] - 4)); /* jmp .+off32 */
-                               }
-                               EMIT4(0x31, 0xd2, 0xf7, 0xf3); /* xor %edx,%edx; div %ebx */
-                               break;
-                       case BPF_S_ALU_MOD_X: /* A %= X; */
-                               seen |= SEEN_XREG;
-                               EMIT2(0x85, 0xdb);      /* test %ebx,%ebx */
-                               if (pc_ret0 > 0) {
-                                       /* addrs[pc_ret0 - 1] is start address of target
-                                        * (addrs[i] - 6) is the address following this jmp
-                                        * ("xor %edx,%edx; div %ebx;mov %edx,%eax" being 6 bytes long)
-                                        */
-                                       EMIT_COND_JMP(X86_JE, addrs[pc_ret0 - 1] -
-                                                               (addrs[i] - 6));
-                               } else {
-                                       EMIT_COND_JMP(X86_JNE, 2 + 5);
-                                       CLEAR_A();
-                                       EMIT1_off32(0xe9, cleanup_addr - (addrs[i] - 6)); /* jmp .+off32 */
-                               }
-                               EMIT2(0x31, 0xd2);      /* xor %edx,%edx */
-                               EMIT2(0xf7, 0xf3);      /* div %ebx */
-                               EMIT2(0x89, 0xd0);      /* mov %edx,%eax */
-                               break;
-                       case BPF_S_ALU_MOD_K: /* A %= K; */
-                               if (K == 1) {
-                                       CLEAR_A();
-                                       break;
-                               }
-                               EMIT2(0x31, 0xd2);      /* xor %edx,%edx */
-                               EMIT1(0xb9);EMIT(K, 4); /* mov imm32,%ecx */
-                               EMIT2(0xf7, 0xf1);      /* div %ecx */
-                               EMIT2(0x89, 0xd0);      /* mov %edx,%eax */
-                               break;
-                       case BPF_S_ALU_DIV_K: /* A /= K */
-                               if (K == 1)
-                                       break;
-                               EMIT2(0x31, 0xd2);      /* xor %edx,%edx */
-                               EMIT1(0xb9);EMIT(K, 4); /* mov imm32,%ecx */
-                               EMIT2(0xf7, 0xf1);      /* div %ecx */
-                               break;
-                       case BPF_S_ALU_AND_X:
-                               seen |= SEEN_XREG;
-                               EMIT2(0x21, 0xd8);              /* and %ebx,%eax */
-                               break;
-                       case BPF_S_ALU_AND_K:
-                               if (K >= 0xFFFFFF00) {
-                                       EMIT2(0x24, K & 0xFF); /* and imm8,%al */
-                               } else if (K >= 0xFFFF0000) {
-                                       EMIT2(0x66, 0x25);      /* and imm16,%ax */
-                                       EMIT(K, 2);
-                               } else {
-                                       EMIT1_off32(0x25, K);   /* and imm32,%eax */
-                               }
-                               break;
-                       case BPF_S_ALU_OR_X:
-                               seen |= SEEN_XREG;
-                               EMIT2(0x09, 0xd8);              /* or %ebx,%eax */
-                               break;
-                       case BPF_S_ALU_OR_K:
-                               if (is_imm8(K))
-                                       EMIT3(0x83, 0xc8, K); /* or imm8,%eax */
-                               else
-                                       EMIT1_off32(0x0d, K);   /* or imm32,%eax */
-                               break;
-                       case BPF_S_ANC_ALU_XOR_X: /* A ^= X; */
-                       case BPF_S_ALU_XOR_X:
-                               seen |= SEEN_XREG;
-                               EMIT2(0x31, 0xd8);              /* xor %ebx,%eax */
-                               break;
-                       case BPF_S_ALU_XOR_K: /* A ^= K; */
-                               if (K == 0)
-                                       break;
-                               if (is_imm8(K))
-                                       EMIT3(0x83, 0xf0, K);   /* xor imm8,%eax */
-                               else
-                                       EMIT1_off32(0x35, K);   /* xor imm32,%eax */
-                               break;
-                       case BPF_S_ALU_LSH_X: /* A <<= X; */
-                               seen |= SEEN_XREG;
-                               EMIT4(0x89, 0xd9, 0xd3, 0xe0);  /* mov %ebx,%ecx; shl %cl,%eax */
-                               break;
-                       case BPF_S_ALU_LSH_K:
-                               if (K == 0)
-                                       break;
-                               else if (K == 1)
-                                       EMIT2(0xd1, 0xe0); /* shl %eax */
-                               else
-                                       EMIT3(0xc1, 0xe0, K);
-                               break;
-                       case BPF_S_ALU_RSH_X: /* A >>= X; */
-                               seen |= SEEN_XREG;
-                               EMIT4(0x89, 0xd9, 0xd3, 0xe8);  /* mov %ebx,%ecx; shr %cl,%eax */
-                               break;
-                       case BPF_S_ALU_RSH_K: /* A >>= K; */
-                               if (K == 0)
-                                       break;
-                               else if (K == 1)
-                                       EMIT2(0xd1, 0xe8); /* shr %eax */
-                               else
-                                       EMIT3(0xc1, 0xe8, K);
-                               break;
-                       case BPF_S_ALU_NEG:
-                               EMIT2(0xf7, 0xd8);              /* neg %eax */
-                               break;
-                       case BPF_S_RET_K:
-                               if (!K) {
-                                       if (pc_ret0 == -1)
-                                               pc_ret0 = i;
-                                       CLEAR_A();
-                               } else {
-                                       EMIT1_off32(0xb8, K);   /* mov $imm32,%eax */
-                               }
-                               /* fallinto */
-                       case BPF_S_RET_A:
-                               if (seen_or_pass0) {
-                                       if (i != flen - 1) {
-                                               EMIT_JMP(cleanup_addr - addrs[i]);
-                                               break;
-                                       }
-                                       if (seen_or_pass0 & SEEN_XREG)
-                                               EMIT4(0x48, 0x8b, 0x5d, 0xf8);  /* mov  -8(%rbp),%rbx */
-                                       EMIT1(0xc9);            /* leaveq */
-                               }
-                               EMIT1(0xc3);            /* ret */
-                               break;
-                       case BPF_S_MISC_TAX: /* X = A */
-                               seen |= SEEN_XREG;
-                               EMIT2(0x89, 0xc3);      /* mov    %eax,%ebx */
-                               break;
-                       case BPF_S_MISC_TXA: /* A = X */
-                               seen |= SEEN_XREG;
-                               EMIT2(0x89, 0xd8);      /* mov    %ebx,%eax */
-                               break;
-                       case BPF_S_LD_IMM: /* A = K */
-                               if (!K)
-                                       CLEAR_A();
-                               else
-                                       EMIT1_off32(0xb8, K); /* mov $imm32,%eax */
-                               break;
-                       case BPF_S_LDX_IMM: /* X = K */
-                               seen |= SEEN_XREG;
-                               if (!K)
-                                       CLEAR_X();
+                       /* shifts */
+               case BPF_ALU | BPF_LSH | BPF_K:
+               case BPF_ALU | BPF_RSH | BPF_K:
+               case BPF_ALU | BPF_ARSH | BPF_K:
+               case BPF_ALU64 | BPF_LSH | BPF_K:
+               case BPF_ALU64 | BPF_RSH | BPF_K:
+               case BPF_ALU64 | BPF_ARSH | BPF_K:
+                       if (BPF_CLASS(insn->code) == BPF_ALU64)
+                               EMIT1(add_1mod(0x48, a_reg));
+                       else if (is_ereg(a_reg))
+                               EMIT1(add_1mod(0x40, a_reg));
+
+                       switch (BPF_OP(insn->code)) {
+                       case BPF_LSH: b3 = 0xE0; break;
+                       case BPF_RSH: b3 = 0xE8; break;
+                       case BPF_ARSH: b3 = 0xF8; break;
+                       }
+                       EMIT3(0xC1, add_1reg(b3, a_reg), K);
+                       break;
+
+               case BPF_ALU | BPF_END | BPF_FROM_BE:
+                       switch (K) {
+                       case 16:
+                               /* emit 'ror %ax, 8' to swap lower 2 bytes */
+                               EMIT1(0x66);
+                               if (is_ereg(a_reg))
+                                       EMIT1(0x41);
+                               EMIT3(0xC1, add_1reg(0xC8, a_reg), 8);
+                               break;
+                       case 32:
+                               /* emit 'bswap eax' to swap lower 4 bytes */
+                               if (is_ereg(a_reg))
+                                       EMIT2(0x41, 0x0F);
                                else
-                                       EMIT1_off32(0xbb, K); /* mov $imm32,%ebx */
-                               break;
-                       case BPF_S_LD_MEM: /* A = mem[K] : mov off8(%rbp),%eax */
-                               seen |= SEEN_MEM;
-                               EMIT3(0x8b, 0x45, 0xf0 - K*4);
-                               break;
-                       case BPF_S_LDX_MEM: /* X = mem[K] : mov off8(%rbp),%ebx */
-                               seen |= SEEN_XREG | SEEN_MEM;
-                               EMIT3(0x8b, 0x5d, 0xf0 - K*4);
-                               break;
-                       case BPF_S_ST: /* mem[K] = A : mov %eax,off8(%rbp) */
-                               seen |= SEEN_MEM;
-                               EMIT3(0x89, 0x45, 0xf0 - K*4);
-                               break;
-                       case BPF_S_STX: /* mem[K] = X : mov %ebx,off8(%rbp) */
-                               seen |= SEEN_XREG | SEEN_MEM;
-                               EMIT3(0x89, 0x5d, 0xf0 - K*4);
-                               break;
-                       case BPF_S_LD_W_LEN: /* A = skb->len; */
-                               BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4);
-                               if (is_imm8(offsetof(struct sk_buff, len)))
-                                       /* mov    off8(%rdi),%eax */
-                                       EMIT3(0x8b, 0x47, offsetof(struct sk_buff, len));
-                               else {
-                                       EMIT2(0x8b, 0x87);
-                                       EMIT(offsetof(struct sk_buff, len), 4);
-                               }
-                               break;
-                       case BPF_S_LDX_W_LEN: /* X = skb->len; */
-                               seen |= SEEN_XREG;
-                               if (is_imm8(offsetof(struct sk_buff, len)))
-                                       /* mov off8(%rdi),%ebx */
-                                       EMIT3(0x8b, 0x5f, offsetof(struct sk_buff, len));
-                               else {
-                                       EMIT2(0x8b, 0x9f);
-                                       EMIT(offsetof(struct sk_buff, len), 4);
-                               }
-                               break;
-                       case BPF_S_ANC_PROTOCOL: /* A = ntohs(skb->protocol); */
-                               BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, protocol) != 2);
-                               if (is_imm8(offsetof(struct sk_buff, protocol))) {
-                                       /* movzwl off8(%rdi),%eax */
-                                       EMIT4(0x0f, 0xb7, 0x47, offsetof(struct sk_buff, protocol));
-                               } else {
-                                       EMIT3(0x0f, 0xb7, 0x87); /* movzwl off32(%rdi),%eax */
-                                       EMIT(offsetof(struct sk_buff, protocol), 4);
-                               }
-                               EMIT2(0x86, 0xc4); /* ntohs() : xchg   %al,%ah */
-                               break;
-                       case BPF_S_ANC_IFINDEX:
-                               if (is_imm8(offsetof(struct sk_buff, dev))) {
-                                       /* movq off8(%rdi),%rax */
-                                       EMIT4(0x48, 0x8b, 0x47, offsetof(struct sk_buff, dev));
-                               } else {
-                                       EMIT3(0x48, 0x8b, 0x87); /* movq off32(%rdi),%rax */
-                                       EMIT(offsetof(struct sk_buff, dev), 4);
-                               }
-                               EMIT3(0x48, 0x85, 0xc0);        /* test %rax,%rax */
-                               EMIT_COND_JMP(X86_JE, cleanup_addr - (addrs[i] - 6));
-                               BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, ifindex) != 4);
-                               EMIT2(0x8b, 0x80);      /* mov off32(%rax),%eax */
-                               EMIT(offsetof(struct net_device, ifindex), 4);
+                                       EMIT1(0x0F);
+                               EMIT1(add_1reg(0xC8, a_reg));
                                break;
-                       case BPF_S_ANC_MARK:
-                               BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4);
-                               if (is_imm8(offsetof(struct sk_buff, mark))) {
-                                       /* mov off8(%rdi),%eax */
-                                       EMIT3(0x8b, 0x47, offsetof(struct sk_buff, mark));
-                               } else {
-                                       EMIT2(0x8b, 0x87);
-                                       EMIT(offsetof(struct sk_buff, mark), 4);
-                               }
-                               break;
-                       case BPF_S_ANC_RXHASH:
-                               BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4);
-                               if (is_imm8(offsetof(struct sk_buff, hash))) {
-                                       /* mov off8(%rdi),%eax */
-                                       EMIT3(0x8b, 0x47, offsetof(struct sk_buff, hash));
-                               } else {
-                                       EMIT2(0x8b, 0x87);
-                                       EMIT(offsetof(struct sk_buff, hash), 4);
-                               }
-                               break;
-                       case BPF_S_ANC_QUEUE:
-                               BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, queue_mapping) != 2);
-                               if (is_imm8(offsetof(struct sk_buff, queue_mapping))) {
-                                       /* movzwl off8(%rdi),%eax */
-                                       EMIT4(0x0f, 0xb7, 0x47, offsetof(struct sk_buff, queue_mapping));
-                               } else {
-                                       EMIT3(0x0f, 0xb7, 0x87); /* movzwl off32(%rdi),%eax */
-                                       EMIT(offsetof(struct sk_buff, queue_mapping), 4);
-                               }
-                               break;
-                       case BPF_S_ANC_CPU:
-#ifdef CONFIG_SMP
-                               EMIT4(0x65, 0x8b, 0x04, 0x25); /* mov %gs:off32,%eax */
-                               EMIT((u32)(unsigned long)&cpu_number, 4); /* A = smp_processor_id(); */
-#else
-                               CLEAR_A();
-#endif
-                               break;
-                       case BPF_S_ANC_VLAN_TAG:
-                       case BPF_S_ANC_VLAN_TAG_PRESENT:
-                               BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2);
-                               if (is_imm8(offsetof(struct sk_buff, vlan_tci))) {
-                                       /* movzwl off8(%rdi),%eax */
-                                       EMIT4(0x0f, 0xb7, 0x47, offsetof(struct sk_buff, vlan_tci));
-                               } else {
-                                       EMIT3(0x0f, 0xb7, 0x87); /* movzwl off32(%rdi),%eax */
-                                       EMIT(offsetof(struct sk_buff, vlan_tci), 4);
-                               }
-                               BUILD_BUG_ON(VLAN_TAG_PRESENT != 0x1000);
-                               if (filter[i].code == BPF_S_ANC_VLAN_TAG) {
-                                       EMIT3(0x80, 0xe4, 0xef); /* and    $0xef,%ah */
-                               } else {
-                                       EMIT3(0xc1, 0xe8, 0x0c); /* shr    $0xc,%eax */
-                                       EMIT3(0x83, 0xe0, 0x01); /* and    $0x1,%eax */
-                               }
-                               break;
-                       case BPF_S_ANC_PKTTYPE:
-                       {
-                               int off = pkt_type_offset();
-
-                               if (off < 0)
-                                       goto out;
-                               if (is_imm8(off)) {
-                                       /* movzbl off8(%rdi),%eax */
-                                       EMIT4(0x0f, 0xb6, 0x47, off);
-                               } else {
-                                       /* movbl off32(%rdi),%eax */
-                                       EMIT3(0x0f, 0xb6, 0x87);
-                                       EMIT(off, 4);
-                               }
-                               EMIT3(0x83, 0xe0, PKT_TYPE_MAX); /* and    $0x7,%eax */
+                       case 64:
+                               /* emit 'bswap rax' to swap 8 bytes */
+                               EMIT3(add_1mod(0x48, a_reg), 0x0F,
+                                     add_1reg(0xC8, a_reg));
                                break;
                        }
-                       case BPF_S_LD_W_ABS:
-                               func = CHOOSE_LOAD_FUNC(K, sk_load_word);
-common_load:                   seen |= SEEN_DATAREF;
-                               t_offset = func - (image + addrs[i]);
-                               EMIT1_off32(0xbe, K); /* mov imm32,%esi */
-                               EMIT1_off32(0xe8, t_offset); /* call */
-                               break;
-                       case BPF_S_LD_H_ABS:
-                               func = CHOOSE_LOAD_FUNC(K, sk_load_half);
-                               goto common_load;
-                       case BPF_S_LD_B_ABS:
-                               func = CHOOSE_LOAD_FUNC(K, sk_load_byte);
-                               goto common_load;
-                       case BPF_S_LDX_B_MSH:
-                               func = CHOOSE_LOAD_FUNC(K, sk_load_byte_msh);
-                               seen |= SEEN_DATAREF | SEEN_XREG;
-                               t_offset = func - (image + addrs[i]);
-                               EMIT1_off32(0xbe, K);   /* mov imm32,%esi */
-                               EMIT1_off32(0xe8, t_offset); /* call sk_load_byte_msh */
-                               break;
-                       case BPF_S_LD_W_IND:
-                               func = sk_load_word;
-common_load_ind:               seen |= SEEN_DATAREF | SEEN_XREG;
-                               t_offset = func - (image + addrs[i]);
-                               if (K) {
-                                       if (is_imm8(K)) {
-                                               EMIT3(0x8d, 0x73, K); /* lea imm8(%rbx), %esi */
-                                       } else {
-                                               EMIT2(0x8d, 0xb3); /* lea imm32(%rbx),%esi */
-                                               EMIT(K, 4);
-                                       }
-                               } else {
-                                       EMIT2(0x89,0xde); /* mov %ebx,%esi */
-                               }
-                               EMIT1_off32(0xe8, t_offset);    /* call sk_load_xxx_ind */
-                               break;
-                       case BPF_S_LD_H_IND:
-                               func = sk_load_half;
-                               goto common_load_ind;
-                       case BPF_S_LD_B_IND:
-                               func = sk_load_byte;
-                               goto common_load_ind;
-                       case BPF_S_JMP_JA:
-                               t_offset = addrs[i + K] - addrs[i];
-                               EMIT_JMP(t_offset);
-                               break;
-                       COND_SEL(BPF_S_JMP_JGT_K, X86_JA, X86_JBE);
-                       COND_SEL(BPF_S_JMP_JGE_K, X86_JAE, X86_JB);
-                       COND_SEL(BPF_S_JMP_JEQ_K, X86_JE, X86_JNE);
-                       COND_SEL(BPF_S_JMP_JSET_K,X86_JNE, X86_JE);
-                       COND_SEL(BPF_S_JMP_JGT_X, X86_JA, X86_JBE);
-                       COND_SEL(BPF_S_JMP_JGE_X, X86_JAE, X86_JB);
-                       COND_SEL(BPF_S_JMP_JEQ_X, X86_JE, X86_JNE);
-                       COND_SEL(BPF_S_JMP_JSET_X,X86_JNE, X86_JE);
-
-cond_branch:                   f_offset = addrs[i + filter[i].jf] - addrs[i];
-                               t_offset = addrs[i + filter[i].jt] - addrs[i];
-
-                               /* same targets, can avoid doing the test :) */
-                               if (filter[i].jt == filter[i].jf) {
-                                       EMIT_JMP(t_offset);
-                                       break;
-                               }
+                       break;
+
+               case BPF_ALU | BPF_END | BPF_FROM_LE:
+                       break;
 
-                               switch (filter[i].code) {
-                               case BPF_S_JMP_JGT_X:
-                               case BPF_S_JMP_JGE_X:
-                               case BPF_S_JMP_JEQ_X:
-                                       seen |= SEEN_XREG;
-                                       EMIT2(0x39, 0xd8); /* cmp %ebx,%eax */
-                                       break;
-                               case BPF_S_JMP_JSET_X:
-                                       seen |= SEEN_XREG;
-                                       EMIT2(0x85, 0xd8); /* test %ebx,%eax */
-                                       break;
-                               case BPF_S_JMP_JEQ_K:
-                                       if (K == 0) {
-                                               EMIT2(0x85, 0xc0); /* test   %eax,%eax */
-                                               break;
-                                       }
-                               case BPF_S_JMP_JGT_K:
-                               case BPF_S_JMP_JGE_K:
-                                       if (K <= 127)
-                                               EMIT3(0x83, 0xf8, K); /* cmp imm8,%eax */
+                       /* ST: *(u8*)(a_reg + off) = imm */
+               case BPF_ST | BPF_MEM | BPF_B:
+                       if (is_ereg(a_reg))
+                               EMIT2(0x41, 0xC6);
+                       else
+                               EMIT1(0xC6);
+                       goto st;
+               case BPF_ST | BPF_MEM | BPF_H:
+                       if (is_ereg(a_reg))
+                               EMIT3(0x66, 0x41, 0xC7);
+                       else
+                               EMIT2(0x66, 0xC7);
+                       goto st;
+               case BPF_ST | BPF_MEM | BPF_W:
+                       if (is_ereg(a_reg))
+                               EMIT2(0x41, 0xC7);
+                       else
+                               EMIT1(0xC7);
+                       goto st;
+               case BPF_ST | BPF_MEM | BPF_DW:
+                       EMIT2(add_1mod(0x48, a_reg), 0xC7);
+
+st:                    if (is_imm8(insn->off))
+                               EMIT2(add_1reg(0x40, a_reg), insn->off);
+                       else
+                               EMIT1_off32(add_1reg(0x80, a_reg), insn->off);
+
+                       EMIT(K, bpf_size_to_x86_bytes(BPF_SIZE(insn->code)));
+                       break;
+
+                       /* STX: *(u8*)(a_reg + off) = x_reg */
+               case BPF_STX | BPF_MEM | BPF_B:
+                       /* emit 'mov byte ptr [rax + off], al' */
+                       if (is_ereg(a_reg) || is_ereg(x_reg) ||
+                           /* have to add extra byte for x86 SIL, DIL regs */
+                           x_reg == BPF_REG_1 || x_reg == BPF_REG_2)
+                               EMIT2(add_2mod(0x40, a_reg, x_reg), 0x88);
+                       else
+                               EMIT1(0x88);
+                       goto stx;
+               case BPF_STX | BPF_MEM | BPF_H:
+                       if (is_ereg(a_reg) || is_ereg(x_reg))
+                               EMIT3(0x66, add_2mod(0x40, a_reg, x_reg), 0x89);
+                       else
+                               EMIT2(0x66, 0x89);
+                       goto stx;
+               case BPF_STX | BPF_MEM | BPF_W:
+                       if (is_ereg(a_reg) || is_ereg(x_reg))
+                               EMIT2(add_2mod(0x40, a_reg, x_reg), 0x89);
+                       else
+                               EMIT1(0x89);
+                       goto stx;
+               case BPF_STX | BPF_MEM | BPF_DW:
+                       EMIT2(add_2mod(0x48, a_reg, x_reg), 0x89);
+stx:                   if (is_imm8(insn->off))
+                               EMIT2(add_2reg(0x40, a_reg, x_reg), insn->off);
+                       else
+                               EMIT1_off32(add_2reg(0x80, a_reg, x_reg),
+                                           insn->off);
+                       break;
+
+                       /* LDX: a_reg = *(u8*)(x_reg + off) */
+               case BPF_LDX | BPF_MEM | BPF_B:
+                       /* emit 'movzx rax, byte ptr [rax + off]' */
+                       EMIT3(add_2mod(0x48, x_reg, a_reg), 0x0F, 0xB6);
+                       goto ldx;
+               case BPF_LDX | BPF_MEM | BPF_H:
+                       /* emit 'movzx rax, word ptr [rax + off]' */
+                       EMIT3(add_2mod(0x48, x_reg, a_reg), 0x0F, 0xB7);
+                       goto ldx;
+               case BPF_LDX | BPF_MEM | BPF_W:
+                       /* emit 'mov eax, dword ptr [rax+0x14]' */
+                       if (is_ereg(a_reg) || is_ereg(x_reg))
+                               EMIT2(add_2mod(0x40, x_reg, a_reg), 0x8B);
+                       else
+                               EMIT1(0x8B);
+                       goto ldx;
+               case BPF_LDX | BPF_MEM | BPF_DW:
+                       /* emit 'mov rax, qword ptr [rax+0x14]' */
+                       EMIT2(add_2mod(0x48, x_reg, a_reg), 0x8B);
+ldx:                   /* if insn->off == 0 we can save one extra byte, but
+                        * special case of x86 r13 which always needs an offset
+                        * is not worth the hassle
+                        */
+                       if (is_imm8(insn->off))
+                               EMIT2(add_2reg(0x40, x_reg, a_reg), insn->off);
+                       else
+                               EMIT1_off32(add_2reg(0x80, x_reg, a_reg),
+                                           insn->off);
+                       break;
+
+                       /* STX XADD: lock *(u32*)(a_reg + off) += x_reg */
+               case BPF_STX | BPF_XADD | BPF_W:
+                       /* emit 'lock add dword ptr [rax + off], eax' */
+                       if (is_ereg(a_reg) || is_ereg(x_reg))
+                               EMIT3(0xF0, add_2mod(0x40, a_reg, x_reg), 0x01);
+                       else
+                               EMIT2(0xF0, 0x01);
+                       goto xadd;
+               case BPF_STX | BPF_XADD | BPF_DW:
+                       EMIT3(0xF0, add_2mod(0x48, a_reg, x_reg), 0x01);
+xadd:                  if (is_imm8(insn->off))
+                               EMIT2(add_2reg(0x40, a_reg, x_reg), insn->off);
+                       else
+                               EMIT1_off32(add_2reg(0x80, a_reg, x_reg),
+                                           insn->off);
+                       break;
+
+                       /* call */
+               case BPF_JMP | BPF_CALL:
+                       func = (u8 *) __bpf_call_base + K;
+                       jmp_offset = func - (image + addrs[i]);
+                       if (ctx->seen_ld_abs) {
+                               EMIT2(0x41, 0x52); /* push %r10 */
+                               EMIT2(0x41, 0x51); /* push %r9 */
+                               /* need to adjust jmp offset, since
+                                * pop %r9, pop %r10 take 4 bytes after call insn
+                                */
+                               jmp_offset += 4;
+                       }
+                       if (!K || !is_simm32(jmp_offset)) {
+                               pr_err("unsupported bpf func %d addr %p image %p\n",
+                                      K, func, image);
+                               return -EINVAL;
+                       }
+                       EMIT1_off32(0xE8, jmp_offset);
+                       if (ctx->seen_ld_abs) {
+                               EMIT2(0x41, 0x59); /* pop %r9 */
+                               EMIT2(0x41, 0x5A); /* pop %r10 */
+                       }
+                       break;
+
+                       /* cond jump */
+               case BPF_JMP | BPF_JEQ | BPF_X:
+               case BPF_JMP | BPF_JNE | BPF_X:
+               case BPF_JMP | BPF_JGT | BPF_X:
+               case BPF_JMP | BPF_JGE | BPF_X:
+               case BPF_JMP | BPF_JSGT | BPF_X:
+               case BPF_JMP | BPF_JSGE | BPF_X:
+                       /* cmp a_reg, x_reg */
+                       EMIT3(add_2mod(0x48, a_reg, x_reg), 0x39,
+                             add_2reg(0xC0, a_reg, x_reg));
+                       goto emit_cond_jmp;
+
+               case BPF_JMP | BPF_JSET | BPF_X:
+                       /* test a_reg, x_reg */
+                       EMIT3(add_2mod(0x48, a_reg, x_reg), 0x85,
+                             add_2reg(0xC0, a_reg, x_reg));
+                       goto emit_cond_jmp;
+
+               case BPF_JMP | BPF_JSET | BPF_K:
+                       /* test a_reg, imm32 */
+                       EMIT1(add_1mod(0x48, a_reg));
+                       EMIT2_off32(0xF7, add_1reg(0xC0, a_reg), K);
+                       goto emit_cond_jmp;
+
+               case BPF_JMP | BPF_JEQ | BPF_K:
+               case BPF_JMP | BPF_JNE | BPF_K:
+               case BPF_JMP | BPF_JGT | BPF_K:
+               case BPF_JMP | BPF_JGE | BPF_K:
+               case BPF_JMP | BPF_JSGT | BPF_K:
+               case BPF_JMP | BPF_JSGE | BPF_K:
+                       /* cmp a_reg, imm8/32 */
+                       EMIT1(add_1mod(0x48, a_reg));
+
+                       if (is_imm8(K))
+                               EMIT3(0x83, add_1reg(0xF8, a_reg), K);
+                       else
+                               EMIT2_off32(0x81, add_1reg(0xF8, a_reg), K);
+
+emit_cond_jmp:         /* convert BPF opcode to x86 */
+                       switch (BPF_OP(insn->code)) {
+                       case BPF_JEQ:
+                               jmp_cond = X86_JE;
+                               break;
+                       case BPF_JSET:
+                       case BPF_JNE:
+                               jmp_cond = X86_JNE;
+                               break;
+                       case BPF_JGT:
+                               /* GT is unsigned '>', JA in x86 */
+                               jmp_cond = X86_JA;
+                               break;
+                       case BPF_JGE:
+                               /* GE is unsigned '>=', JAE in x86 */
+                               jmp_cond = X86_JAE;
+                               break;
+                       case BPF_JSGT:
+                               /* signed '>', GT in x86 */
+                               jmp_cond = X86_JG;
+                               break;
+                       case BPF_JSGE:
+                               /* signed '>=', GE in x86 */
+                               jmp_cond = X86_JGE;
+                               break;
+                       default: /* to silence gcc warning */
+                               return -EFAULT;
+                       }
+                       jmp_offset = addrs[i + insn->off] - addrs[i];
+                       if (is_imm8(jmp_offset)) {
+                               EMIT2(jmp_cond, jmp_offset);
+                       } else if (is_simm32(jmp_offset)) {
+                               EMIT2_off32(0x0F, jmp_cond + 0x10, jmp_offset);
+                       } else {
+                               pr_err("cond_jmp gen bug %llx\n", jmp_offset);
+                               return -EFAULT;
+                       }
+
+                       break;
+
+               case BPF_JMP | BPF_JA:
+                       jmp_offset = addrs[i + insn->off] - addrs[i];
+                       if (!jmp_offset)
+                               /* optimize out nop jumps */
+                               break;
+emit_jmp:
+                       if (is_imm8(jmp_offset)) {
+                               EMIT2(0xEB, jmp_offset);
+                       } else if (is_simm32(jmp_offset)) {
+                               EMIT1_off32(0xE9, jmp_offset);
+                       } else {
+                               pr_err("jmp gen bug %llx\n", jmp_offset);
+                               return -EFAULT;
+                       }
+                       break;
+
+               case BPF_LD | BPF_IND | BPF_W:
+                       func = sk_load_word;
+                       goto common_load;
+               case BPF_LD | BPF_ABS | BPF_W:
+                       func = CHOOSE_LOAD_FUNC(K, sk_load_word);
+common_load:           ctx->seen_ld_abs = true;
+                       jmp_offset = func - (image + addrs[i]);
+                       if (!func || !is_simm32(jmp_offset)) {
+                               pr_err("unsupported bpf func %d addr %p image %p\n",
+                                      K, func, image);
+                               return -EINVAL;
+                       }
+                       if (BPF_MODE(insn->code) == BPF_ABS) {
+                               /* mov %esi, imm32 */
+                               EMIT1_off32(0xBE, K);
+                       } else {
+                               /* mov %rsi, x_reg */
+                               EMIT_mov(BPF_REG_2, x_reg);
+                               if (K) {
+                                       if (is_imm8(K))
+                                               /* add %esi, imm8 */
+                                               EMIT3(0x83, 0xC6, K);
                                        else
-                                               EMIT1_off32(0x3d, K); /* cmp imm32,%eax */
-                                       break;
-                               case BPF_S_JMP_JSET_K:
-                                       if (K <= 0xFF)
-                                               EMIT2(0xa8, K); /* test imm8,%al */
-                                       else if (!(K & 0xFFFF00FF))
-                                               EMIT3(0xf6, 0xc4, K >> 8); /* test imm8,%ah */
-                                       else if (K <= 0xFFFF) {
-                                               EMIT2(0x66, 0xa9); /* test imm16,%ax */
-                                               EMIT(K, 2);
-                                       } else {
-                                               EMIT1_off32(0xa9, K); /* test imm32,%eax */
-                                       }
-                                       break;
+                                               /* add %esi, imm32 */
+                                               EMIT2_off32(0x81, 0xC6, K);
                                }
-                               if (filter[i].jt != 0) {
-                                       if (filter[i].jf && f_offset)
-                                               t_offset += is_near(f_offset) ? 2 : 5;
-                                       EMIT_COND_JMP(t_op, t_offset);
-                                       if (filter[i].jf)
-                                               EMIT_JMP(f_offset);
-                                       break;
-                               }
-                               EMIT_COND_JMP(f_op, f_offset);
-                               break;
-                       default:
-                               /* hmm, too complex filter, give up with jit compiler */
-                               goto out;
                        }
-                       ilen = prog - temp;
-                       if (image) {
-                               if (unlikely(proglen + ilen > oldproglen)) {
-                                       pr_err("bpb_jit_compile fatal error\n");
-                                       kfree(addrs);
-                                       module_free(NULL, header);
-                                       return;
-                               }
-                               memcpy(image + proglen, temp, ilen);
+                       /* skb pointer is in R6 (%rbx), it will be copied into
+                        * %rdi if skb_copy_bits() call is necessary.
+                        * sk_load_* helpers also use %r10 and %r9d.
+                        * See bpf_jit.S
+                        */
+                       EMIT1_off32(0xE8, jmp_offset); /* call */
+                       break;
+
+               case BPF_LD | BPF_IND | BPF_H:
+                       func = sk_load_half;
+                       goto common_load;
+               case BPF_LD | BPF_ABS | BPF_H:
+                       func = CHOOSE_LOAD_FUNC(K, sk_load_half);
+                       goto common_load;
+               case BPF_LD | BPF_IND | BPF_B:
+                       func = sk_load_byte;
+                       goto common_load;
+               case BPF_LD | BPF_ABS | BPF_B:
+                       func = CHOOSE_LOAD_FUNC(K, sk_load_byte);
+                       goto common_load;
+
+               case BPF_JMP | BPF_EXIT:
+                       if (i != insn_cnt - 1) {
+                               jmp_offset = ctx->cleanup_addr - addrs[i];
+                               goto emit_jmp;
                        }
-                       proglen += ilen;
-                       addrs[i] = proglen;
-                       prog = temp;
+                       /* update cleanup_addr */
+                       ctx->cleanup_addr = proglen;
+                       /* mov rbx, qword ptr [rbp-X] */
+                       EMIT3_off32(0x48, 0x8B, 0x9D, -stacksize);
+                       /* mov r13, qword ptr [rbp-X] */
+                       EMIT3_off32(0x4C, 0x8B, 0xAD, -stacksize + 8);
+                       /* mov r14, qword ptr [rbp-X] */
+                       EMIT3_off32(0x4C, 0x8B, 0xB5, -stacksize + 16);
+                       /* mov r15, qword ptr [rbp-X] */
+                       EMIT3_off32(0x4C, 0x8B, 0xBD, -stacksize + 24);
+
+                       EMIT1(0xC9); /* leave */
+                       EMIT1(0xC3); /* ret */
+                       break;
+
+               default:
+                       /* By design x64 JIT should support all BPF instructions
+                        * This error will be seen if new instruction was added
+                        * to interpreter, but not to JIT
+                        * or if there is junk in sk_filter
+                        */
+                       pr_err("bpf_jit: unknown opcode %02x\n", insn->code);
+                       return -EINVAL;
                }
-               /* last bpf instruction is always a RET :
-                * use it to give the cleanup instruction(s) addr
-                */
-               cleanup_addr = proglen - 1; /* ret */
-               if (seen_or_pass0)
-                       cleanup_addr -= 1; /* leaveq */
-               if (seen_or_pass0 & SEEN_XREG)
-                       cleanup_addr -= 4; /* mov  -8(%rbp),%rbx */
 
+               ilen = prog - temp;
+               if (image) {
+                       if (unlikely(proglen + ilen > oldproglen)) {
+                               pr_err("bpf_jit_compile fatal error\n");
+                               return -EFAULT;
+                       }
+                       memcpy(image + proglen, temp, ilen);
+               }
+               proglen += ilen;
+               addrs[i] = proglen;
+               prog = temp;
+       }
+       return proglen;
+}
+
+void bpf_jit_compile(struct sk_filter *prog)
+{
+}
+
+void bpf_int_jit_compile(struct sk_filter *prog)
+{
+       struct bpf_binary_header *header = NULL;
+       int proglen, oldproglen = 0;
+       struct jit_context ctx = {};
+       u8 *image = NULL;
+       int *addrs;
+       int pass;
+       int i;
+
+       if (!bpf_jit_enable)
+               return;
+
+       if (!prog || !prog->len)
+               return;
+
+       addrs = kmalloc(prog->len * sizeof(*addrs), GFP_KERNEL);
+       if (!addrs)
+               return;
+
+       /* Before first pass, make a rough estimation of addrs[]
+        * each bpf instruction is translated to less than 64 bytes
+        */
+       for (proglen = 0, i = 0; i < prog->len; i++) {
+               proglen += 64;
+               addrs[i] = proglen;
+       }
+       ctx.cleanup_addr = proglen;
+
+       for (pass = 0; pass < 10; pass++) {
+               proglen = do_jit(prog, addrs, image, oldproglen, &ctx);
+               if (proglen <= 0) {
+                       image = NULL;
+                       if (header)
+                               module_free(NULL, header);
+                       goto out;
+               }
                if (image) {
                        if (proglen != oldproglen)
-                               pr_err("bpb_jit_compile proglen=%u != oldproglen=%u\n", proglen, oldproglen);
+                               pr_err("bpf_jit: proglen=%d != oldproglen=%d\n",
+                                      proglen, oldproglen);
                        break;
                }
                if (proglen == oldproglen) {
@@ -766,17 +918,16 @@ cond_branch:                      f_offset = addrs[i + filter[i].jf] - addrs[i];
        }
 
        if (bpf_jit_enable > 1)
-               bpf_jit_dump(flen, proglen, pass, image);
+               bpf_jit_dump(prog->len, proglen, 0, image);
 
        if (image) {
                bpf_flush_icache(header, image + proglen);
                set_memory_ro((unsigned long)header, header->pages);
-               fp->bpf_func = (void *)image;
-               fp->jited = 1;
+               prog->bpf_func = (void *)image;
+               prog->jited = 1;
        }
 out:
        kfree(addrs);
-       return;
 }
 
 static void bpf_jit_free_deferred(struct work_struct *work)
index 2e263f367b139c30da34b9e346007e206132955e..9df017ab2285a015a111cc384a0f28d6e488bbf8 100644 (file)
@@ -9,12 +9,9 @@ SECTIONS
 #ifdef BUILD_VDSO32
 #include <asm/vdso32.h>
 
-       .hpet_sect : {
-               hpet_page = . - VDSO_OFFSET(VDSO_HPET_PAGE);
-       } :text :hpet_sect
+       hpet_page = . - VDSO_OFFSET(VDSO_HPET_PAGE);
 
-       .vvar_sect : {
-               vvar = . - VDSO_OFFSET(VDSO_VVAR_PAGE);
+       vvar = . - VDSO_OFFSET(VDSO_VVAR_PAGE);
 
        /* Place all vvars at the offsets in asm/vvar.h. */
 #define EMIT_VVAR(name, offset) vvar_ ## name = vvar + offset;
@@ -22,7 +19,6 @@ SECTIONS
 #include <asm/vvar.h>
 #undef __VVAR_KERNEL_LDS
 #undef EMIT_VVAR
-       } :text :vvar_sect
 #endif
        . = SIZEOF_HEADERS;
 
@@ -61,7 +57,12 @@ SECTIONS
         */
        . = ALIGN(0x100);
 
-       .text           : { *(.text*) }                 :text   =0x90909090
+       .text           : { *(.text*) }                 :text   =0x90909090,
+
+       /*
+        * The comma above works around a bug in gold:
+        * https://sourceware.org/bugzilla/show_bug.cgi?id=16804
+        */
 
        /DISCARD/ : {
                *(.discard)
@@ -84,8 +85,4 @@ PHDRS
        dynamic         PT_DYNAMIC      FLAGS(4);               /* PF_R */
        note            PT_NOTE         FLAGS(4);               /* PF_R */
        eh_frame_hdr    PT_GNU_EH_FRAME;
-#ifdef BUILD_VDSO32
-       vvar_sect       PT_NULL         FLAGS(4);               /* PF_R */
-       hpet_sect       PT_NULL         FLAGS(4);               /* PF_R */
-#endif
 }
index 02d6d29a63c13716168c0a68b4bdfe55c71ef9b0..3a617af60d465196bb894cebdc4042ccd4e4a92f 100644 (file)
@@ -14,6 +14,7 @@ config XTENSA
        select GENERIC_PCI_IOMAP
        select ARCH_WANT_IPC_PARSE_VERSION
        select ARCH_WANT_OPTIONAL_GPIOLIB
+       select BUILDTIME_EXTABLE_SORT
        select CLONE_BACKWARDS
        select IRQ_DOMAIN
        select HAVE_OPROFILE
@@ -189,6 +190,24 @@ config INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX
 
          If in doubt, say Y.
 
+config HIGHMEM
+       bool "High Memory Support"
+       help
+         Linux can use the full amount of RAM in the system by
+         default. However, the default MMUv2 setup only maps the
+         lowermost 128 MB of memory linearly to the areas starting
+         at 0xd0000000 (cached) and 0xd8000000 (uncached).
+         When there are more than 128 MB memory in the system not
+         all of it can be "permanently mapped" by the kernel.
+         The physical memory that's not permanently mapped is called
+         "high memory".
+
+         If you are compiling a kernel which will never run on a
+         machine with more than 128 MB total physical RAM, answer
+         N here.
+
+         If unsure, say Y.
+
 endmenu
 
 config XTENSA_CALIBRATE_CCOUNT
@@ -224,7 +243,6 @@ choice
 
 config XTENSA_PLATFORM_ISS
        bool "ISS"
-       depends on TTY
        select XTENSA_CALIBRATE_CCOUNT
        select SERIAL_CONSOLE
        help
diff --git a/arch/xtensa/boot/dts/kc705.dts b/arch/xtensa/boot/dts/kc705.dts
new file mode 100644 (file)
index 0000000..742a347
--- /dev/null
@@ -0,0 +1,11 @@
+/dts-v1/;
+/include/ "xtfpga.dtsi"
+/include/ "xtfpga-flash-128m.dtsi"
+
+/ {
+       compatible = "cdns,xtensa-kc705";
+       memory@0 {
+               device_type = "memory";
+               reg = <0x00000000 0x08000000>;
+       };
+};
diff --git a/arch/xtensa/boot/dts/xtfpga-flash-128m.dtsi b/arch/xtensa/boot/dts/xtfpga-flash-128m.dtsi
new file mode 100644 (file)
index 0000000..d3a88e0
--- /dev/null
@@ -0,0 +1,28 @@
+/ {
+       soc {
+               flash: flash@00000000 {
+                       #address-cells = <1>;
+                       #size-cells = <1>;
+                       compatible = "cfi-flash";
+                       reg = <0x00000000 0x08000000>;
+                       bank-width = <2>;
+                       device-width = <2>;
+                       partition@0x0 {
+                               label = "data";
+                               reg = <0x00000000 0x06000000>;
+                       };
+                       partition@0x6000000 {
+                               label = "boot loader area";
+                               reg = <0x06000000 0x00800000>;
+                       };
+                       partition@0x6800000 {
+                               label = "kernel image";
+                               reg = <0x06800000 0x017e0000>;
+                       };
+                       partition@0x7fe0000 {
+                               label = "boot environment";
+                               reg = <0x07fe0000 0x00020000>;
+                       };
+               };
+        };
+};
index e5703c7beeb6dad04d2006929984b0fd8a7c3289..1d97203c18e7f787b5696b4468e4901c9b88fcbb 100644 (file)
@@ -1,26 +1,28 @@
 / {
-       flash: flash@f8000000 {
-               #address-cells = <1>;
-               #size-cells = <1>;
-               compatible = "cfi-flash";
-               reg = <0xf8000000 0x01000000>;
-               bank-width = <2>;
-               device-width = <2>;
-               partition@0x0 {
-                       label = "boot loader area";
-                       reg = <0x00000000 0x00400000>;
+       soc {
+               flash: flash@08000000 {
+                       #address-cells = <1>;
+                       #size-cells = <1>;
+                       compatible = "cfi-flash";
+                       reg = <0x08000000 0x01000000>;
+                       bank-width = <2>;
+                       device-width = <2>;
+                       partition@0x0 {
+                               label = "boot loader area";
+                               reg = <0x00000000 0x00400000>;
+                       };
+                       partition@0x400000 {
+                               label = "kernel image";
+                               reg = <0x00400000 0x00600000>;
+                       };
+                       partition@0xa00000 {
+                               label = "data";
+                               reg = <0x00a00000 0x005e0000>;
+                       };
+                       partition@0xfe0000 {
+                               label = "boot environment";
+                               reg = <0x00fe0000 0x00020000>;
+                       };
                };
-               partition@0x400000 {
-                       label = "kernel image";
-                       reg = <0x00400000 0x00600000>;
-               };
-               partition@0xa00000 {
-                       label = "data";
-                       reg = <0x00a00000 0x005e0000>;
-               };
-               partition@0xfe0000 {
-                       label = "boot environment";
-                       reg = <0x00fe0000 0x00020000>;
-               };
-        };
+       };
 };
index 6f9c10d6b689a9696d17296025a21a1167322fbc..d1c621ca8be10cba5565fa9ce1e7b82cabaef973 100644 (file)
@@ -1,18 +1,20 @@
 / {
-       flash: flash@f8000000 {
-               #address-cells = <1>;
-               #size-cells = <1>;
-               compatible = "cfi-flash";
-               reg = <0xf8000000 0x00400000>;
-               bank-width = <2>;
-               device-width = <2>;
-               partition@0x0 {
-                       label = "boot loader area";
-                       reg = <0x00000000 0x003f0000>;
+       soc {
+               flash: flash@08000000 {
+                       #address-cells = <1>;
+                       #size-cells = <1>;
+                       compatible = "cfi-flash";
+                       reg = <0x08000000 0x00400000>;
+                       bank-width = <2>;
+                       device-width = <2>;
+                       partition@0x0 {
+                               label = "boot loader area";
+                               reg = <0x00000000 0x003f0000>;
+                       };
+                       partition@0x3f0000 {
+                               label = "boot environment";
+                               reg = <0x003f0000 0x00010000>;
+                       };
                };
-               partition@0x3f0000 {
-                       label = "boot environment";
-                       reg = <0x003f0000 0x00010000>;
-               };
-        };
+       };
 };
index e7370b11348e8d06c113d420704664740dbdec9b..dec9178840f695f0bcdd1de3cd5b17339fce8627 100644 (file)
                };
        };
 
-       serial0: serial@fd050020 {
-               device_type = "serial";
-               compatible = "ns16550a";
-               no-loopback-test;
-               reg = <0xfd050020 0x20>;
-               reg-shift = <2>;
-               interrupts = <0 1>; /* external irq 0 */
-               clocks = <&osc>;
-       };
+       soc {
+               #address-cells = <1>;
+               #size-cells = <1>;
+               compatible = "simple-bus";
+               ranges = <0x00000000 0xf0000000 0x10000000>;
 
-       enet0: ethoc@fd030000 {
-               compatible = "opencores,ethoc";
-               reg = <0xfd030000 0x4000 0xfd800000 0x4000>;
-               interrupts = <1 1>; /* external irq 1 */
-               local-mac-address = [00 50 c2 13 6f 00];
-               clocks = <&osc>;
+               serial0: serial@0d050020 {
+                       device_type = "serial";
+                       compatible = "ns16550a";
+                       no-loopback-test;
+                       reg = <0x0d050020 0x20>;
+                       reg-shift = <2>;
+                       interrupts = <0 1>; /* external irq 0 */
+                       clocks = <&osc>;
+               };
+
+               enet0: ethoc@0d030000 {
+                       compatible = "opencores,ethoc";
+                       reg = <0x0d030000 0x4000 0x0d800000 0x4000>;
+                       interrupts = <1 1>; /* external irq 1 */
+                       local-mac-address = [00 50 c2 13 6f 00];
+                       clocks = <&osc>;
+               };
        };
 };
index 23392c5630ce9939b04bae3ff6de4fd67417852a..892aab399ac873c885953e24430a3bc741aca6ed 100644 (file)
@@ -37,23 +37,14 @@ typedef struct bp_tag {
        unsigned long data[0];  /* data */
 } bp_tag_t;
 
-typedef struct meminfo {
+struct bp_meminfo {
        unsigned long type;
        unsigned long start;
        unsigned long end;
-} meminfo_t;
-
-#define SYSMEM_BANKS_MAX 5
+};
 
 #define MEMORY_TYPE_CONVENTIONAL       0x1000
 #define MEMORY_TYPE_NONE               0x2000
 
-typedef struct sysmem_info {
-       int nr_banks;
-       meminfo_t bank[SYSMEM_BANKS_MAX];
-} sysmem_info_t;
-
-extern sysmem_info_t sysmem;
-
 #endif
 #endif
diff --git a/arch/xtensa/include/asm/fixmap.h b/arch/xtensa/include/asm/fixmap.h
new file mode 100644 (file)
index 0000000..9f6c33d
--- /dev/null
@@ -0,0 +1,58 @@
+/*
+ * fixmap.h: compile-time virtual memory allocation
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1998 Ingo Molnar
+ *
+ * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
+ */
+
+#ifndef _ASM_FIXMAP_H
+#define _ASM_FIXMAP_H
+
+#include <asm/pgtable.h>
+#ifdef CONFIG_HIGHMEM
+#include <linux/threads.h>
+#include <asm/kmap_types.h>
+#endif
+
+/*
+ * Here we define all the compile-time 'special' virtual
+ * addresses. The point is to have a constant address at
+ * compile time, but to set the physical address only
+ * in the boot process. We allocate these special  addresses
+ * from the end of the consistent memory region backwards.
+ * Also this lets us do fail-safe vmalloc(), we
+ * can guarantee that these special addresses and
+ * vmalloc()-ed addresses never overlap.
+ *
+ * these 'compile-time allocated' memory buffers are
+ * fixed-size 4k pages. (or larger if used with an increment
+ * higher than 1) use fixmap_set(idx,phys) to associate
+ * physical memory with fixmap indices.
+ */
+enum fixed_addresses {
+#ifdef CONFIG_HIGHMEM
+       /* reserved pte's for temporary kernel mappings */
+       FIX_KMAP_BEGIN,
+       FIX_KMAP_END = FIX_KMAP_BEGIN + (KM_TYPE_NR * NR_CPUS) - 1,
+#endif
+       __end_of_fixed_addresses
+};
+
+#define FIXADDR_TOP     (VMALLOC_START - PAGE_SIZE)
+#define FIXADDR_SIZE   (__end_of_fixed_addresses << PAGE_SHIFT)
+#define FIXADDR_START  ((FIXADDR_TOP - FIXADDR_SIZE) & PMD_MASK)
+
+#include <asm-generic/fixmap.h>
+
+#define kmap_get_fixmap_pte(vaddr) \
+       pte_offset_kernel( \
+               pmd_offset(pud_offset(pgd_offset_k(vaddr), (vaddr)), (vaddr)), \
+               (vaddr) \
+       )
+
+#endif
index 80be15124697d8e85a23859a466e97f178936010..2653ef5d55f1c9ed92d35d50f91136732334ec1b 100644 (file)
@@ -6,11 +6,54 @@
  * this archive for more details.
  *
  * Copyright (C) 2003 - 2005 Tensilica Inc.
+ * Copyright (C) 2014 Cadence Design Systems Inc.
  */
 
 #ifndef _XTENSA_HIGHMEM_H
 #define _XTENSA_HIGHMEM_H
 
-extern void flush_cache_kmaps(void);
+#include <asm/cacheflush.h>
+#include <asm/fixmap.h>
+#include <asm/kmap_types.h>
+#include <asm/pgtable.h>
+
+#define PKMAP_BASE             (FIXADDR_START - PMD_SIZE)
+#define LAST_PKMAP             PTRS_PER_PTE
+#define LAST_PKMAP_MASK                (LAST_PKMAP - 1)
+#define PKMAP_NR(virt)         (((virt) - PKMAP_BASE) >> PAGE_SHIFT)
+#define PKMAP_ADDR(nr)         (PKMAP_BASE + ((nr) << PAGE_SHIFT))
+
+#define kmap_prot              PAGE_KERNEL
+
+extern pte_t *pkmap_page_table;
+
+void *kmap_high(struct page *page);
+void kunmap_high(struct page *page);
+
+static inline void *kmap(struct page *page)
+{
+       BUG_ON(in_interrupt());
+       if (!PageHighMem(page))
+               return page_address(page);
+       return kmap_high(page);
+}
+
+static inline void kunmap(struct page *page)
+{
+       BUG_ON(in_interrupt());
+       if (!PageHighMem(page))
+               return;
+       kunmap_high(page);
+}
+
+static inline void flush_cache_kmaps(void)
+{
+       flush_cache_all();
+}
+
+void *kmap_atomic(struct page *page);
+void __kunmap_atomic(void *kvaddr);
+
+void kmap_init(void);
 
 #endif
index 216446295ada686ccb4b454319b2b45e85d96720..4b0ca35a93b1a731bf0ce2c1db32f9fabb890fef 100644 (file)
@@ -310,6 +310,10 @@ set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pteval)
        update_pte(ptep, pteval);
 }
 
+static inline void set_pte(pte_t *ptep, pte_t pteval)
+{
+       update_pte(ptep, pteval);
+}
 
 static inline void
 set_pmd(pmd_t *pmdp, pmd_t pmdval)
diff --git a/arch/xtensa/include/asm/sysmem.h b/arch/xtensa/include/asm/sysmem.h
new file mode 100644 (file)
index 0000000..c015c5c
--- /dev/null
@@ -0,0 +1,38 @@
+/*
+ * sysmem-related prototypes.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2014 Cadence Design Systems Inc.
+ */
+
+#ifndef _XTENSA_SYSMEM_H
+#define _XTENSA_SYSMEM_H
+
+#define SYSMEM_BANKS_MAX 31
+
+struct meminfo {
+       unsigned long start;
+       unsigned long end;
+};
+
+/*
+ * Bank array is sorted by .start.
+ * Banks don't overlap and there's at least one page gap
+ * between adjacent bank entries.
+ */
+struct sysmem_info {
+       int nr_banks;
+       struct meminfo bank[SYSMEM_BANKS_MAX];
+};
+
+extern struct sysmem_info sysmem;
+
+int add_sysmem_bank(unsigned long start, unsigned long end);
+int mem_reserve(unsigned long, unsigned long, int);
+void bootmem_init(void);
+void zones_init(void);
+
+#endif /* _XTENSA_SYSMEM_H */
index fc34274ce41bc81b3ddaa167fe887179f04c8ad7..06875feb27c28ebb870820706dc286cd9740f1ce 100644 (file)
@@ -36,6 +36,7 @@ void local_flush_tlb_page(struct vm_area_struct *vma,
                unsigned long page);
 void local_flush_tlb_range(struct vm_area_struct *vma,
                unsigned long start, unsigned long end);
+void local_flush_tlb_kernel_range(unsigned long start, unsigned long end);
 
 #ifdef CONFIG_SMP
 
@@ -44,12 +45,7 @@ void flush_tlb_mm(struct mm_struct *);
 void flush_tlb_page(struct vm_area_struct *, unsigned long);
 void flush_tlb_range(struct vm_area_struct *, unsigned long,
                unsigned long);
-
-static inline void flush_tlb_kernel_range(unsigned long start,
-               unsigned long end)
-{
-       flush_tlb_all();
-}
+void flush_tlb_kernel_range(unsigned long start, unsigned long end);
 
 #else /* !CONFIG_SMP */
 
@@ -58,7 +54,8 @@ static inline void flush_tlb_kernel_range(unsigned long start,
 #define flush_tlb_page(vma, page)         local_flush_tlb_page(vma, page)
 #define flush_tlb_range(vma, vmaddr, end)  local_flush_tlb_range(vma, vmaddr, \
                                                                 end)
-#define flush_tlb_kernel_range(start, end) local_flush_tlb_all()
+#define flush_tlb_kernel_range(start, end) local_flush_tlb_kernel_range(start, \
+                                                                       end)
 
 #endif /* CONFIG_SMP */
 
index 84fe931bb60e1f012417d202d002813b12cf68aa..9757bb74e53296f66372dd08506d94163e8801cc 100644 (file)
@@ -50,6 +50,7 @@
 #include <asm/param.h>
 #include <asm/traps.h>
 #include <asm/smp.h>
+#include <asm/sysmem.h>
 
 #include <platform/hardware.h>
 
@@ -88,12 +89,6 @@ static char __initdata command_line[COMMAND_LINE_SIZE];
 static char default_command_line[COMMAND_LINE_SIZE] __initdata = CONFIG_CMDLINE;
 #endif
 
-sysmem_info_t __initdata sysmem;
-
-extern int mem_reserve(unsigned long, unsigned long, int);
-extern void bootmem_init(void);
-extern void zones_init(void);
-
 /*
  * Boot parameter parsing.
  *
@@ -113,31 +108,14 @@ typedef struct tagtable {
 
 /* parse current tag */
 
-static int __init add_sysmem_bank(unsigned long type, unsigned long start,
-               unsigned long end)
-{
-       if (sysmem.nr_banks >= SYSMEM_BANKS_MAX) {
-               printk(KERN_WARNING
-                               "Ignoring memory bank 0x%08lx size %ldKB\n",
-                               start, end - start);
-               return -EINVAL;
-       }
-       sysmem.bank[sysmem.nr_banks].type  = type;
-       sysmem.bank[sysmem.nr_banks].start = PAGE_ALIGN(start);
-       sysmem.bank[sysmem.nr_banks].end   = end & PAGE_MASK;
-       sysmem.nr_banks++;
-
-       return 0;
-}
-
 static int __init parse_tag_mem(const bp_tag_t *tag)
 {
-       meminfo_t *mi = (meminfo_t *)(tag->data);
+       struct bp_meminfo *mi = (struct bp_meminfo *)(tag->data);
 
        if (mi->type != MEMORY_TYPE_CONVENTIONAL)
                return -1;
 
-       return add_sysmem_bank(mi->type, mi->start, mi->end);
+       return add_sysmem_bank(mi->start, mi->end);
 }
 
 __tagtable(BP_TAG_MEMORY, parse_tag_mem);
@@ -146,8 +124,8 @@ __tagtable(BP_TAG_MEMORY, parse_tag_mem);
 
 static int __init parse_tag_initrd(const bp_tag_t* tag)
 {
-       meminfo_t* mi;
-       mi = (meminfo_t*)(tag->data);
+       struct bp_meminfo *mi = (struct bp_meminfo *)(tag->data);
+
        initrd_start = (unsigned long)__va(mi->start);
        initrd_end = (unsigned long)__va(mi->end);
 
@@ -255,7 +233,7 @@ void __init early_init_dt_add_memory_arch(u64 base, u64 size)
                return;
 
        size &= PAGE_MASK;
-       add_sysmem_bank(MEMORY_TYPE_CONVENTIONAL, base, base + size);
+       add_sysmem_bank(base, base + size);
 }
 
 void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align)
@@ -292,8 +270,6 @@ device_initcall(xtensa_device_probe);
 
 void __init init_arch(bp_tag_t *bp_start)
 {
-       sysmem.nr_banks = 0;
-
        /* Parse boot parameters */
 
        if (bp_start)
@@ -304,10 +280,9 @@ void __init init_arch(bp_tag_t *bp_start)
 #endif
 
        if (sysmem.nr_banks == 0) {
-               sysmem.nr_banks = 1;
-               sysmem.bank[0].start = PLATFORM_DEFAULT_MEM_START;
-               sysmem.bank[0].end = PLATFORM_DEFAULT_MEM_START
-                                    + PLATFORM_DEFAULT_MEM_SIZE;
+               add_sysmem_bank(PLATFORM_DEFAULT_MEM_START,
+                               PLATFORM_DEFAULT_MEM_START +
+                               PLATFORM_DEFAULT_MEM_SIZE);
        }
 
 #ifdef CONFIG_CMDLINE_BOOL
@@ -487,7 +462,7 @@ void __init setup_arch(char **cmdline_p)
 #ifdef CONFIG_BLK_DEV_INITRD
        if (initrd_start < initrd_end) {
                initrd_is_mapped = mem_reserve(__pa(initrd_start),
-                                              __pa(initrd_end), 0);
+                                              __pa(initrd_end), 0) == 0;
                initrd_below_start_ok = 1;
        } else {
                initrd_start = 0;
@@ -532,6 +507,7 @@ void __init setup_arch(char **cmdline_p)
                    __pa(&_Level6InterruptVector_text_end), 0);
 #endif
 
+       parse_early_param();
        bootmem_init();
 
        unflatten_and_copy_device_tree();
index aa8bd8717927185bd5b422316885ddaa98d889f7..40b5a3771fb063fb02ffaa7fe07a426a3a684677 100644 (file)
@@ -496,6 +496,21 @@ void flush_tlb_range(struct vm_area_struct *vma,
        on_each_cpu(ipi_flush_tlb_range, &fd, 1);
 }
 
+static void ipi_flush_tlb_kernel_range(void *arg)
+{
+       struct flush_data *fd = arg;
+       local_flush_tlb_kernel_range(fd->addr1, fd->addr2);
+}
+
+void flush_tlb_kernel_range(unsigned long start, unsigned long end)
+{
+       struct flush_data fd = {
+               .addr1 = start,
+               .addr2 = end,
+       };
+       on_each_cpu(ipi_flush_tlb_kernel_range, &fd, 1);
+}
+
 /* Cache flush functions */
 
 static void ipi_flush_cache_all(void *arg)
index 80b33ed51f31174fd41a53bebd957c140517d8f9..4d2872fd9bb5ebf89bb15127841e5ae28e8d9b58 100644 (file)
@@ -20,6 +20,7 @@
 #include <linux/in6.h>
 
 #include <asm/uaccess.h>
+#include <asm/cacheflush.h>
 #include <asm/checksum.h>
 #include <asm/dma.h>
 #include <asm/io.h>
@@ -105,6 +106,7 @@ EXPORT_SYMBOL(csum_partial_copy_generic);
  * Architecture-specific symbols
  */
 EXPORT_SYMBOL(__xtensa_copy_user);
+EXPORT_SYMBOL(__invalidate_icache_range);
 
 /*
  * Kernel hacking ...
@@ -127,3 +129,8 @@ EXPORT_SYMBOL(common_exception_return);
 #ifdef CONFIG_FUNCTION_TRACER
 EXPORT_SYMBOL(_mcount);
 #endif
+
+EXPORT_SYMBOL(__invalidate_dcache_range);
+#if XCHAL_DCACHE_IS_WRITEBACK
+EXPORT_SYMBOL(__flush_dcache_range);
+#endif
index f0b646d2f843feb5945601dbe65966a4a71cfec9..f54f78e24d7b5e72733f23da806272a4cba6b882 100644 (file)
@@ -4,3 +4,4 @@
 
 obj-y                  := init.o cache.o misc.o
 obj-$(CONFIG_MMU)      += fault.o mmu.o tlb.o
+obj-$(CONFIG_HIGHMEM)  += highmem.o
index ba4c47f291b17843047a410549b09cb59ba52967..63cbb867dadd64d8907176f1bd60420f8a41217a 100644 (file)
  *
  */
 
+#if (DCACHE_WAY_SIZE > PAGE_SIZE) && defined(CONFIG_HIGHMEM)
+#error "HIGHMEM is not supported on cores with aliasing cache."
+#endif
+
 #if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK
 
 /*
@@ -179,10 +183,11 @@ update_mmu_cache(struct vm_area_struct * vma, unsigned long addr, pte_t *ptep)
 #else
        if (!PageReserved(page) && !test_bit(PG_arch_1, &page->flags)
            && (vma->vm_flags & VM_EXEC) != 0) {
-               unsigned long paddr = (unsigned long) page_address(page);
+               unsigned long paddr = (unsigned long)kmap_atomic(page);
                __flush_dcache_page(paddr);
                __invalidate_icache_page(paddr);
                set_bit(PG_arch_1, &page->flags);
+               kunmap_atomic((void *)paddr);
        }
 #endif
 }
diff --git a/arch/xtensa/mm/highmem.c b/arch/xtensa/mm/highmem.c
new file mode 100644 (file)
index 0000000..17a8c0d
--- /dev/null
@@ -0,0 +1,72 @@
+/*
+ * High memory support for Xtensa architecture
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License.  See the file "COPYING" in the main directory of
+ * this archive for more details.
+ *
+ * Copyright (C) 2014 Cadence Design Systems Inc.
+ */
+
+#include <linux/export.h>
+#include <linux/highmem.h>
+#include <asm/tlbflush.h>
+
+static pte_t *kmap_pte;
+
+void *kmap_atomic(struct page *page)
+{
+       enum fixed_addresses idx;
+       unsigned long vaddr;
+       int type;
+
+       pagefault_disable();
+       if (!PageHighMem(page))
+               return page_address(page);
+
+       type = kmap_atomic_idx_push();
+       idx = type + KM_TYPE_NR * smp_processor_id();
+       vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
+#ifdef CONFIG_DEBUG_HIGHMEM
+       BUG_ON(!pte_none(*(kmap_pte - idx)));
+#endif
+       set_pte(kmap_pte - idx, mk_pte(page, PAGE_KERNEL_EXEC));
+
+       return (void *)vaddr;
+}
+EXPORT_SYMBOL(kmap_atomic);
+
+void __kunmap_atomic(void *kvaddr)
+{
+       int idx, type;
+
+       if (kvaddr >= (void *)FIXADDR_START &&
+           kvaddr < (void *)FIXADDR_TOP) {
+               type = kmap_atomic_idx();
+               idx = type + KM_TYPE_NR * smp_processor_id();
+
+               /*
+                * Force other mappings to Oops if they'll try to access this
+                * pte without first remap it.  Keeping stale mappings around
+                * is a bad idea also, in case the page changes cacheability
+                * attributes or becomes a protected page in a hypervisor.
+                */
+               pte_clear(&init_mm, kvaddr, kmap_pte - idx);
+               local_flush_tlb_kernel_range((unsigned long)kvaddr,
+                                            (unsigned long)kvaddr + PAGE_SIZE);
+
+               kmap_atomic_idx_pop();
+       }
+
+       pagefault_enable();
+}
+EXPORT_SYMBOL(__kunmap_atomic);
+
+void __init kmap_init(void)
+{
+       unsigned long kmap_vstart;
+
+       /* cache the first kmap pte */
+       kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
+       kmap_pte = kmap_get_fixmap_pte(kmap_vstart);
+}
index aff108df92d3a301e8ba0ccaf7e13a5fb26c9038..4224256bb215f17c52d91662f186ecb250dee361 100644 (file)
@@ -8,6 +8,7 @@
  * for more details.
  *
  * Copyright (C) 2001 - 2005 Tensilica Inc.
+ * Copyright (C) 2014 Cadence Design Systems Inc.
  *
  * Chris Zankel        <chris@zankel.net>
  * Joe Taylor  <joe@tensilica.com, joetylr@yahoo.com>
@@ -19,6 +20,7 @@
 #include <linux/errno.h>
 #include <linux/bootmem.h>
 #include <linux/gfp.h>
+#include <linux/highmem.h>
 #include <linux/swap.h>
 #include <linux/mman.h>
 #include <linux/nodemask.h>
 #include <asm/bootparam.h>
 #include <asm/page.h>
 #include <asm/sections.h>
+#include <asm/sysmem.h>
+
+struct sysmem_info sysmem __initdata;
+
+static void __init sysmem_dump(void)
+{
+       unsigned i;
+
+       pr_debug("Sysmem:\n");
+       for (i = 0; i < sysmem.nr_banks; ++i)
+               pr_debug("  0x%08lx - 0x%08lx (%ldK)\n",
+                        sysmem.bank[i].start, sysmem.bank[i].end,
+                        (sysmem.bank[i].end - sysmem.bank[i].start) >> 10);
+}
+
+/*
+ * Find bank with maximal .start such that bank.start <= start
+ */
+static inline struct meminfo * __init find_bank(unsigned long start)
+{
+       unsigned i;
+       struct meminfo *it = NULL;
+
+       for (i = 0; i < sysmem.nr_banks; ++i)
+               if (sysmem.bank[i].start <= start)
+                       it = sysmem.bank + i;
+               else
+                       break;
+       return it;
+}
+
+/*
+ * Move all memory banks starting at 'from' to a new place at 'to',
+ * adjust nr_banks accordingly.
+ * Both 'from' and 'to' must be inside the sysmem.bank.
+ *
+ * Returns: 0 (success), -ENOMEM (not enough space in the sysmem.bank).
+ */
+static int __init move_banks(struct meminfo *to, struct meminfo *from)
+{
+       unsigned n = sysmem.nr_banks - (from - sysmem.bank);
+
+       if (to > from && to - from + sysmem.nr_banks > SYSMEM_BANKS_MAX)
+               return -ENOMEM;
+       if (to != from)
+               memmove(to, from, n * sizeof(struct meminfo));
+       sysmem.nr_banks += to - from;
+       return 0;
+}
+
+/*
+ * Add new bank to sysmem. Resulting sysmem is the union of bytes of the
+ * original sysmem and the new bank.
+ *
+ * Returns: 0 (success), < 0 (error)
+ */
+int __init add_sysmem_bank(unsigned long start, unsigned long end)
+{
+       unsigned i;
+       struct meminfo *it = NULL;
+       unsigned long sz;
+       unsigned long bank_sz = 0;
+
+       if (start == end ||
+           (start < end) != (PAGE_ALIGN(start) < (end & PAGE_MASK))) {
+               pr_warn("Ignoring small memory bank 0x%08lx size: %ld bytes\n",
+                       start, end - start);
+               return -EINVAL;
+       }
+
+       start = PAGE_ALIGN(start);
+       end &= PAGE_MASK;
+       sz = end - start;
+
+       it = find_bank(start);
+
+       if (it)
+               bank_sz = it->end - it->start;
+
+       if (it && bank_sz >= start - it->start) {
+               if (end - it->start > bank_sz)
+                       it->end = end;
+               else
+                       return 0;
+       } else {
+               if (!it)
+                       it = sysmem.bank;
+               else
+                       ++it;
+
+               if (it - sysmem.bank < sysmem.nr_banks &&
+                   it->start - start <= sz) {
+                       it->start = start;
+                       if (it->end - it->start < sz)
+                               it->end = end;
+                       else
+                               return 0;
+               } else {
+                       if (move_banks(it + 1, it) < 0) {
+                               pr_warn("Ignoring memory bank 0x%08lx size %ld bytes\n",
+                                       start, end - start);
+                               return -EINVAL;
+                       }
+                       it->start = start;
+                       it->end = end;
+                       return 0;
+               }
+       }
+       sz = it->end - it->start;
+       for (i = it + 1 - sysmem.bank; i < sysmem.nr_banks; ++i)
+               if (sysmem.bank[i].start - it->start <= sz) {
+                       if (sz < sysmem.bank[i].end - it->start)
+                               it->end = sysmem.bank[i].end;
+               } else {
+                       break;
+               }
+
+       move_banks(it + 1, sysmem.bank + i);
+       return 0;
+}
 
 /*
  * mem_reserve(start, end, must_exist)
  *
  * Reserve some memory from the memory pool.
+ * If must_exist is set and a part of the region being reserved does not exist
+ * memory map is not altered.
  *
  * Parameters:
  *  start      Start of region,
  *  must_exist Must exist in memory pool.
  *
  * Returns:
- *  0 (memory area couldn't be mapped)
- * -1 (success)
+ *  0 (success)
+ *  < 0 (error)
  */
 
 int __init mem_reserve(unsigned long start, unsigned long end, int must_exist)
 {
-       int i;
-
-       if (start == end)
-               return 0;
+       struct meminfo *it;
+       struct meminfo *rm = NULL;
+       unsigned long sz;
+       unsigned long bank_sz = 0;
 
        start = start & PAGE_MASK;
        end = PAGE_ALIGN(end);
+       sz = end - start;
+       if (!sz)
+               return -EINVAL;
 
-       for (i = 0; i < sysmem.nr_banks; i++)
-               if (start < sysmem.bank[i].end
-                   && end >= sysmem.bank[i].start)
-                       break;
+       it = find_bank(start);
+
+       if (it)
+               bank_sz = it->end - it->start;
 
-       if (i == sysmem.nr_banks) {
-               if (must_exist)
-                       printk (KERN_WARNING "mem_reserve: [0x%0lx, 0x%0lx) "
-                               "not in any region!\n", start, end);
-               return 0;
+       if ((!it || end - it->start > bank_sz) && must_exist) {
+               pr_warn("mem_reserve: [0x%0lx, 0x%0lx) not in any region!\n",
+                       start, end);
+               return -EINVAL;
        }
 
-       if (start > sysmem.bank[i].start) {
-               if (end < sysmem.bank[i].end) {
-                       /* split entry */
-                       if (sysmem.nr_banks >= SYSMEM_BANKS_MAX)
-                               panic("meminfo overflow\n");
-                       sysmem.bank[sysmem.nr_banks].start = end;
-                       sysmem.bank[sysmem.nr_banks].end = sysmem.bank[i].end;
-                       sysmem.nr_banks++;
+       if (it && start - it->start < bank_sz) {
+               if (start == it->start) {
+                       if (end - it->start < bank_sz) {
+                               it->start = end;
+                               return 0;
+                       } else {
+                               rm = it;
+                       }
+               } else {
+                       it->end = start;
+                       if (end - it->start < bank_sz)
+                               return add_sysmem_bank(end,
+                                                      it->start + bank_sz);
+                       ++it;
                }
-               sysmem.bank[i].end = start;
+       }
 
-       } else if (end < sysmem.bank[i].end) {
-               sysmem.bank[i].start = end;
+       if (!it)
+               it = sysmem.bank;
 
-       } else {
-               /* remove entry */
-               sysmem.nr_banks--;
-               sysmem.bank[i].start = sysmem.bank[sysmem.nr_banks].start;
-               sysmem.bank[i].end   = sysmem.bank[sysmem.nr_banks].end;
+       for (; it < sysmem.bank + sysmem.nr_banks; ++it) {
+               if (it->end - start <= sz) {
+                       if (!rm)
+                               rm = it;
+               } else {
+                       if (it->start - start < sz)
+                               it->start = end;
+                       break;
+               }
        }
-       return -1;
+
+       if (rm)
+               move_banks(rm, it);
+
+       return 0;
 }
 
 
@@ -99,6 +239,7 @@ void __init bootmem_init(void)
        unsigned long bootmap_start, bootmap_size;
        int i;
 
+       sysmem_dump();
        max_low_pfn = max_pfn = 0;
        min_low_pfn = ~0;
 
@@ -156,19 +297,13 @@ void __init bootmem_init(void)
 
 void __init zones_init(void)
 {
-       unsigned long zones_size[MAX_NR_ZONES];
-       int i;
-
        /* All pages are DMA-able, so we put them all in the DMA zone. */
-
-       zones_size[ZONE_DMA] = max_low_pfn - ARCH_PFN_OFFSET;
-       for (i = 1; i < MAX_NR_ZONES; i++)
-               zones_size[i] = 0;
-
+       unsigned long zones_size[MAX_NR_ZONES] = {
+               [ZONE_DMA] = max_low_pfn - ARCH_PFN_OFFSET,
 #ifdef CONFIG_HIGHMEM
-       zones_size[ZONE_HIGHMEM] = max_pfn - max_low_pfn;
+               [ZONE_HIGHMEM] = max_pfn - max_low_pfn,
 #endif
-
+       };
        free_area_init_node(0, zones_size, ARCH_PFN_OFFSET, NULL);
 }
 
@@ -178,16 +313,38 @@ void __init zones_init(void)
 
 void __init mem_init(void)
 {
-       max_mapnr = max_low_pfn - ARCH_PFN_OFFSET;
-       high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT);
-
 #ifdef CONFIG_HIGHMEM
-#error HIGHGMEM not implemented in init.c
+       unsigned long tmp;
+
+       reset_all_zones_managed_pages();
+       for (tmp = max_low_pfn; tmp < max_pfn; tmp++)
+               free_highmem_page(pfn_to_page(tmp));
 #endif
 
+       max_mapnr = max_pfn - ARCH_PFN_OFFSET;
+       high_memory = (void *)__va(max_low_pfn << PAGE_SHIFT);
+
        free_all_bootmem();
 
        mem_init_print_info(NULL);
+       pr_info("virtual kernel memory layout:\n"
+#ifdef CONFIG_HIGHMEM
+               "    pkmap   : 0x%08lx - 0x%08lx  (%5lu kB)\n"
+               "    fixmap  : 0x%08lx - 0x%08lx  (%5lu kB)\n"
+#endif
+               "    vmalloc : 0x%08x - 0x%08x  (%5u MB)\n"
+               "    lowmem  : 0x%08x - 0x%08lx  (%5lu MB)\n",
+#ifdef CONFIG_HIGHMEM
+               PKMAP_BASE, PKMAP_BASE + LAST_PKMAP * PAGE_SIZE,
+               (LAST_PKMAP*PAGE_SIZE) >> 10,
+               FIXADDR_START, FIXADDR_TOP,
+               (FIXADDR_TOP - FIXADDR_START) >> 10,
+#endif
+               VMALLOC_START, VMALLOC_END,
+               (VMALLOC_END - VMALLOC_START) >> 20,
+               PAGE_OFFSET, PAGE_OFFSET +
+               (max_low_pfn - min_low_pfn) * PAGE_SIZE,
+               ((max_low_pfn - min_low_pfn) * PAGE_SIZE) >> 20);
 }
 
 #ifdef CONFIG_BLK_DEV_INITRD
@@ -204,3 +361,53 @@ void free_initmem(void)
 {
        free_initmem_default(-1);
 }
+
+static void __init parse_memmap_one(char *p)
+{
+       char *oldp;
+       unsigned long start_at, mem_size;
+
+       if (!p)
+               return;
+
+       oldp = p;
+       mem_size = memparse(p, &p);
+       if (p == oldp)
+               return;
+
+       switch (*p) {
+       case '@':
+               start_at = memparse(p + 1, &p);
+               add_sysmem_bank(start_at, start_at + mem_size);
+               break;
+
+       case '$':
+               start_at = memparse(p + 1, &p);
+               mem_reserve(start_at, start_at + mem_size, 0);
+               break;
+
+       case 0:
+               mem_reserve(mem_size, 0, 0);
+               break;
+
+       default:
+               pr_warn("Unrecognized memmap syntax: %s\n", p);
+               break;
+       }
+}
+
+static int __init parse_memmap_opt(char *str)
+{
+       while (str) {
+               char *k = strchr(str, ',');
+
+               if (k)
+                       *k++ = 0;
+
+               parse_memmap_one(str);
+               str = k;
+       }
+
+       return 0;
+}
+early_param("memmap", parse_memmap_opt);
index 861203e958da828deb140122752e95b47ddbf35f..3429b483d9f85cd2495e01c8c0a11d05bc22e16c 100644 (file)
@@ -3,6 +3,7 @@
  *
  * Extracted from init.c
  */
+#include <linux/bootmem.h>
 #include <linux/percpu.h>
 #include <linux/init.h>
 #include <linux/string.h>
 #include <asm/initialize_mmu.h>
 #include <asm/io.h>
 
+#if defined(CONFIG_HIGHMEM)
+static void * __init init_pmd(unsigned long vaddr)
+{
+       pgd_t *pgd = pgd_offset_k(vaddr);
+       pmd_t *pmd = pmd_offset(pgd, vaddr);
+
+       if (pmd_none(*pmd)) {
+               unsigned i;
+               pte_t *pte = alloc_bootmem_low_pages(PAGE_SIZE);
+
+               for (i = 0; i < 1024; i++)
+                       pte_clear(NULL, 0, pte + i);
+
+               set_pmd(pmd, __pmd(((unsigned long)pte) & PAGE_MASK));
+               BUG_ON(pte != pte_offset_kernel(pmd, 0));
+               pr_debug("%s: vaddr: 0x%08lx, pmd: 0x%p, pte: 0x%p\n",
+                        __func__, vaddr, pmd, pte);
+               return pte;
+       } else {
+               return pte_offset_kernel(pmd, 0);
+       }
+}
+
+static void __init fixedrange_init(void)
+{
+       BUILD_BUG_ON(FIXADDR_SIZE > PMD_SIZE);
+       init_pmd(__fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK);
+}
+#endif
+
 void __init paging_init(void)
 {
        memset(swapper_pg_dir, 0, PAGE_SIZE);
+#ifdef CONFIG_HIGHMEM
+       fixedrange_init();
+       pkmap_page_table = init_pmd(PKMAP_BASE);
+       kmap_init();
+#endif
 }
 
 /*
index ade623826788b387f150cfae5a90821f07f6b028..5ece856c5725c7cc72d0a0175bf9229330fabec5 100644 (file)
@@ -149,6 +149,21 @@ void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
        local_irq_restore(flags);
 }
 
+void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
+{
+       if (end > start && start >= TASK_SIZE && end <= PAGE_OFFSET &&
+           end - start < _TLB_ENTRIES << PAGE_SHIFT) {
+               start &= PAGE_MASK;
+               while (start < end) {
+                       invalidate_itlb_mapping(start);
+                       invalidate_dtlb_mapping(start);
+                       start += PAGE_SIZE;
+               }
+       } else {
+               local_flush_tlb_all();
+       }
+}
+
 #ifdef CONFIG_DEBUG_TLB_SANITY
 
 static unsigned get_pte_for_vaddr(unsigned vaddr)
index d2369b799c5077f7b9240135cba96d74acfcc7cf..b3e89291cfbafcb35a1eb07f7f584c35ef7f2d81 100644 (file)
@@ -4,6 +4,7 @@
 # "prom monitor" library routines under Linux.
 #
 
-obj-y                  = console.o setup.o
+obj-y                  = setup.o
+obj-$(CONFIG_TTY)      += console.o
 obj-$(CONFIG_NET)      += network.o
 obj-$(CONFIG_BLK_DEV_SIMDISK) += simdisk.o
index f9bc8796629089a540c892109a46a9fe19f68568..b90555cb80890135fee12f7cc3ec361127106836 100644 (file)
@@ -92,18 +92,8 @@ void __init platform_setup(char** cmdline)
 
 /* early initialization */
 
-extern sysmem_info_t __initdata sysmem;
-
-void platform_init(bp_tag_t* first)
+void __init platform_init(bp_tag_t *first)
 {
-       /* Set default memory block if not provided by the bootloader. */
-
-       if (sysmem.nr_banks == 0) {
-               sysmem.nr_banks = 1;
-               sysmem.bank[0].start = PLATFORM_DEFAULT_MEM_START;
-               sysmem.bank[0].end = PLATFORM_DEFAULT_MEM_START
-                                    + PLATFORM_DEFAULT_MEM_SIZE;
-       }
 }
 
 /* Heartbeat. Let the LED blink. */
index 1512e41cd93d74a4e7ab3fde6809e64468f797a8..43665d0d0905ddddf018fe68655c1ff7685b0b9e 100644 (file)
@@ -466,7 +466,7 @@ static int crypto_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
        type -= CRYPTO_MSG_BASE;
        link = &crypto_dispatch[type];
 
-       if (!capable(CAP_NET_ADMIN))
+       if (!netlink_capable(skb, CAP_NET_ADMIN))
                return -EPERM;
 
        if ((type == (CRYPTO_MSG_GETALG - CRYPTO_MSG_BASE) &&
index c29c2c3ec0ad8ffc2c6427393593dbf147899d1b..b06f5f55ada952ced85de9c845dfb49cac421633 100644 (file)
@@ -170,6 +170,9 @@ static int acpi_processor_hotadd_init(struct acpi_processor *pr)
        acpi_status status;
        int ret;
 
+       if (pr->apic_id == -1)
+               return -ENODEV;
+
        status = acpi_evaluate_integer(pr->handle, "_STA", NULL, &sta);
        if (ACPI_FAILURE(status) || !(sta & ACPI_STA_DEVICE_PRESENT))
                return -ENODEV;
@@ -260,10 +263,8 @@ static int acpi_processor_get_info(struct acpi_device *device)
        }
 
        apic_id = acpi_get_apicid(pr->handle, device_declaration, pr->acpi_id);
-       if (apic_id < 0) {
+       if (apic_id < 0)
                acpi_handle_debug(pr->handle, "failed to get CPU APIC ID.\n");
-               return -ENODEV;
-       }
        pr->apic_id = apic_id;
 
        cpu_index = acpi_map_cpuid(pr->apic_id, pr->acpi_id);
index 68d97441432cca3140d2151a50af3384b034464a..12878e1982f77d5f69fcad7d266f7ac922f2a12d 100644 (file)
 #include "accommon.h"
 #include "acdispat.h"
 #include "acinterp.h"
+#include "amlcode.h"
 
 #define _COMPONENT          ACPI_EXECUTER
 ACPI_MODULE_NAME("exfield")
 
+/* Local prototypes */
+static u32
+acpi_ex_get_serial_access_length(u32 accessor_type, u32 access_length);
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_get_serial_access_bytes
+ *
+ * PARAMETERS:  accessor_type   - The type of the protocol indicated by region
+ *                                field access attributes
+ *              access_length   - The access length of the region field
+ *
+ * RETURN:      Decoded access length
+ *
+ * DESCRIPTION: This routine returns the length of the generic_serial_bus
+ *              protocol bytes
+ *
+ ******************************************************************************/
+
+static u32
+acpi_ex_get_serial_access_length(u32 accessor_type, u32 access_length)
+{
+       u32 length;
+
+       switch (accessor_type) {
+       case AML_FIELD_ATTRIB_QUICK:
+
+               length = 0;
+               break;
+
+       case AML_FIELD_ATTRIB_SEND_RCV:
+       case AML_FIELD_ATTRIB_BYTE:
+
+               length = 1;
+               break;
+
+       case AML_FIELD_ATTRIB_WORD:
+       case AML_FIELD_ATTRIB_WORD_CALL:
+
+               length = 2;
+               break;
+
+       case AML_FIELD_ATTRIB_MULTIBYTE:
+       case AML_FIELD_ATTRIB_RAW_BYTES:
+       case AML_FIELD_ATTRIB_RAW_PROCESS:
+
+               length = access_length;
+               break;
+
+       case AML_FIELD_ATTRIB_BLOCK:
+       case AML_FIELD_ATTRIB_BLOCK_CALL:
+       default:
+
+               length = ACPI_GSBUS_BUFFER_SIZE;
+               break;
+       }
+
+       return (length);
+}
+
 /*******************************************************************************
  *
  * FUNCTION:    acpi_ex_read_data_from_field
@@ -63,8 +124,9 @@ ACPI_MODULE_NAME("exfield")
  *              Buffer, depending on the size of the field.
  *
  ******************************************************************************/
+
 acpi_status
-acpi_ex_read_data_from_field(struct acpi_walk_state *walk_state,
+acpi_ex_read_data_from_field(struct acpi_walk_state * walk_state,
                             union acpi_operand_object *obj_desc,
                             union acpi_operand_object **ret_buffer_desc)
 {
@@ -73,6 +135,7 @@ acpi_ex_read_data_from_field(struct acpi_walk_state *walk_state,
        acpi_size length;
        void *buffer;
        u32 function;
+       u16 accessor_type;
 
        ACPI_FUNCTION_TRACE_PTR(ex_read_data_from_field, obj_desc);
 
@@ -116,9 +179,22 @@ acpi_ex_read_data_from_field(struct acpi_walk_state *walk_state,
                            ACPI_READ | (obj_desc->field.attribute << 16);
                } else if (obj_desc->field.region_obj->region.space_id ==
                           ACPI_ADR_SPACE_GSBUS) {
-                       length = ACPI_GSBUS_BUFFER_SIZE;
-                       function =
-                           ACPI_READ | (obj_desc->field.attribute << 16);
+                       accessor_type = obj_desc->field.attribute;
+                       length = acpi_ex_get_serial_access_length(accessor_type,
+                                                                 obj_desc->
+                                                                 field.
+                                                                 access_length);
+
+                       /*
+                        * Add additional 2 bytes for modeled generic_serial_bus data buffer:
+                        * typedef struct {
+                        *     BYTEStatus; // Byte 0 of the data buffer
+                        *     BYTELength; // Byte 1 of the data buffer
+                        *     BYTE[x-1]Data; // Bytes 2-x of the arbitrary length data buffer,
+                        * }
+                        */
+                       length += 2;
+                       function = ACPI_READ | (accessor_type << 16);
                } else {        /* IPMI */
 
                        length = ACPI_IPMI_BUFFER_SIZE;
@@ -231,6 +307,7 @@ acpi_ex_write_data_to_field(union acpi_operand_object *source_desc,
        void *buffer;
        union acpi_operand_object *buffer_desc;
        u32 function;
+       u16 accessor_type;
 
        ACPI_FUNCTION_TRACE_PTR(ex_write_data_to_field, obj_desc);
 
@@ -284,9 +361,22 @@ acpi_ex_write_data_to_field(union acpi_operand_object *source_desc,
                            ACPI_WRITE | (obj_desc->field.attribute << 16);
                } else if (obj_desc->field.region_obj->region.space_id ==
                           ACPI_ADR_SPACE_GSBUS) {
-                       length = ACPI_GSBUS_BUFFER_SIZE;
-                       function =
-                           ACPI_WRITE | (obj_desc->field.attribute << 16);
+                       accessor_type = obj_desc->field.attribute;
+                       length = acpi_ex_get_serial_access_length(accessor_type,
+                                                                 obj_desc->
+                                                                 field.
+                                                                 access_length);
+
+                       /*
+                        * Add additional 2 bytes for modeled generic_serial_bus data buffer:
+                        * typedef struct {
+                        *     BYTEStatus; // Byte 0 of the data buffer
+                        *     BYTELength; // Byte 1 of the data buffer
+                        *     BYTE[x-1]Data; // Bytes 2-x of the arbitrary length data buffer,
+                        * }
+                        */
+                       length += 2;
+                       function = ACPI_WRITE | (accessor_type << 16);
                } else {        /* IPMI */
 
                        length = ACPI_IPMI_BUFFER_SIZE;
index e7e5844c87d0c8de87379ae7ea6eef8ad91cb79f..cf925c4f36b70ee173ad2ad5a688416cfdb31734 100644 (file)
@@ -380,9 +380,8 @@ static void acpi_bus_notify(acpi_handle handle, u32 type, void *data)
                break;
 
        default:
-               acpi_handle_warn(handle, "Unsupported event type 0x%x\n", type);
-               ost_code = ACPI_OST_SC_UNRECOGNIZED_NOTIFY;
-               goto err;
+               acpi_handle_debug(handle, "Unknown event type 0x%x\n", type);
+               break;
        }
 
        adev = acpi_bus_get_acpi_device(handle);
index d7d32c28829b17834507bf8683f2c2a5c77d0a0d..ad11ba4a412dedc893ae4bdbb9230b3583bbf00b 100644 (file)
@@ -206,13 +206,13 @@ unlock:
        spin_unlock_irqrestore(&ec->lock, flags);
 }
 
-static int acpi_ec_sync_query(struct acpi_ec *ec);
+static int acpi_ec_sync_query(struct acpi_ec *ec, u8 *data);
 
 static int ec_check_sci_sync(struct acpi_ec *ec, u8 state)
 {
        if (state & ACPI_EC_FLAG_SCI) {
                if (!test_and_set_bit(EC_FLAGS_QUERY_PENDING, &ec->flags))
-                       return acpi_ec_sync_query(ec);
+                       return acpi_ec_sync_query(ec, NULL);
        }
        return 0;
 }
@@ -443,10 +443,8 @@ acpi_handle ec_get_handle(void)
 
 EXPORT_SYMBOL(ec_get_handle);
 
-static int acpi_ec_query_unlocked(struct acpi_ec *ec, u8 *data);
-
 /*
- * Clears stale _Q events that might have accumulated in the EC.
+ * Process _Q events that might have accumulated in the EC.
  * Run with locked ec mutex.
  */
 static void acpi_ec_clear(struct acpi_ec *ec)
@@ -455,7 +453,7 @@ static void acpi_ec_clear(struct acpi_ec *ec)
        u8 value = 0;
 
        for (i = 0; i < ACPI_EC_CLEAR_MAX; i++) {
-               status = acpi_ec_query_unlocked(ec, &value);
+               status = acpi_ec_sync_query(ec, &value);
                if (status || !value)
                        break;
        }
@@ -582,13 +580,18 @@ static void acpi_ec_run(void *cxt)
        kfree(handler);
 }
 
-static int acpi_ec_sync_query(struct acpi_ec *ec)
+static int acpi_ec_sync_query(struct acpi_ec *ec, u8 *data)
 {
        u8 value = 0;
        int status;
        struct acpi_ec_query_handler *handler, *copy;
-       if ((status = acpi_ec_query_unlocked(ec, &value)))
+
+       status = acpi_ec_query_unlocked(ec, &value);
+       if (data)
+               *data = value;
+       if (status)
                return status;
+
        list_for_each_entry(handler, &ec->list, node) {
                if (value == handler->query_bit) {
                        /* have custom handler for this bit */
@@ -612,7 +615,7 @@ static void acpi_ec_gpe_query(void *ec_cxt)
        if (!ec)
                return;
        mutex_lock(&ec->mutex);
-       acpi_ec_sync_query(ec);
+       acpi_ec_sync_query(ec, NULL);
        mutex_unlock(&ec->mutex);
 }
 
index 20e03a7eb8b431f692e534f6a3d895a2c2cd9476..c2706047337f17c0fad38b3161cabc93d95be0e5 100644 (file)
@@ -116,7 +116,7 @@ config AHCI_ST
 
 config AHCI_IMX
        tristate "Freescale i.MX AHCI SATA support"
-       depends on MFD_SYSCON
+       depends on MFD_SYSCON && (ARCH_MXC || COMPILE_TEST)
        help
          This option enables support for the Freescale i.MX SoC's
          onboard AHCI SATA.
@@ -134,8 +134,7 @@ config AHCI_SUNXI
 
 config AHCI_XGENE
        tristate "APM X-Gene 6.0Gbps AHCI SATA host controller support"
-       depends on ARM64 || COMPILE_TEST
-       select PHY_XGENE
+       depends on PHY_XGENE
        help
         This option enables support for APM X-Gene SoC SATA host controller.
 
index 5a0bf8ed649b8cf9266530ef309aec89a54f99ef..71e15b73513d22ed2bf5ac34afec9b5f42679fe7 100644 (file)
@@ -1164,9 +1164,9 @@ static inline void ahci_gtf_filter_workaround(struct ata_host *host)
 #endif
 
 static int ahci_init_interrupts(struct pci_dev *pdev, unsigned int n_ports,
-                        struct ahci_host_priv *hpriv)
+                               struct ahci_host_priv *hpriv)
 {
-       int nvec;
+       int rc, nvec;
 
        if (hpriv->flags & AHCI_HFLAG_NO_MSI)
                goto intx;
@@ -1183,12 +1183,19 @@ static int ahci_init_interrupts(struct pci_dev *pdev, unsigned int n_ports,
        if (nvec < n_ports)
                goto single_msi;
 
-       nvec = pci_enable_msi_range(pdev, nvec, nvec);
-       if (nvec == -ENOSPC)
+       rc = pci_enable_msi_exact(pdev, nvec);
+       if (rc == -ENOSPC)
                goto single_msi;
-       else if (nvec < 0)
+       else if (rc < 0)
                goto intx;
 
+       /* fallback to single MSI mode if the controller enforced MRSM mode */
+       if (readl(hpriv->mmio + HOST_CTL) & HOST_MRSM) {
+               pci_disable_msi(pdev);
+               printk(KERN_INFO "ahci: MRSM is on, fallback to single MSI\n");
+               goto single_msi;
+       }
+
        return nvec;
 
 single_msi:
@@ -1232,18 +1239,18 @@ int ahci_host_activate(struct ata_host *host, int irq, unsigned int n_msis)
                return rc;
 
        for (i = 0; i < host->n_ports; i++) {
-               const char* desc;
                struct ahci_port_priv *pp = host->ports[i]->private_data;
 
-               /* pp is NULL for dummy ports */
-               if (pp)
-                       desc = pp->irq_desc;
-               else
-                       desc = dev_driver_string(host->dev);
+               /* Do not receive interrupts sent by dummy ports */
+               if (!pp) {
+                       disable_irq(irq + i);
+                       continue;
+               }
 
-               rc = devm_request_threaded_irq(host->dev,
-                       irq + i, ahci_hw_interrupt, ahci_thread_fn, IRQF_SHARED,
-                       desc, host->ports[i]);
+               rc = devm_request_threaded_irq(host->dev, irq + i,
+                                              ahci_hw_interrupt,
+                                              ahci_thread_fn, IRQF_SHARED,
+                                              pp->irq_desc, host->ports[i]);
                if (rc)
                        goto out_free_irqs;
        }
index 51af275b3388541baad3f7bf021a098de9da9bf0..b5eb886da22635c3c76775bc0ef6374af3464b98 100644 (file)
@@ -94,6 +94,7 @@ enum {
        /* HOST_CTL bits */
        HOST_RESET              = (1 << 0),  /* reset controller; self-clear */
        HOST_IRQ_EN             = (1 << 1),  /* global IRQ enable */
+       HOST_MRSM               = (1 << 2),  /* MSI Revert to Single Message */
        HOST_AHCI_EN            = (1 << 31), /* AHCI enabled */
 
        /* HOST_CAP bits */
index c19734d96d7e6a029a1adf9667ce5a897708b58c..943cc8b83e59bb7f1b293abce887be717047ffff 100644 (file)
@@ -4224,8 +4224,10 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
        { "PIONEER DVD-RW  DVR-216D",   NULL,   ATA_HORKAGE_NOSETXFER },
 
        /* devices that don't properly handle queued TRIM commands */
-       { "Micron_M500*",               NULL,   ATA_HORKAGE_NO_NCQ_TRIM, },
-       { "Crucial_CT???M500SSD*",      NULL,   ATA_HORKAGE_NO_NCQ_TRIM, },
+       { "Micron_M500*",               "MU0[1-4]*",    ATA_HORKAGE_NO_NCQ_TRIM, },
+       { "Crucial_CT???M500SSD*",      "MU0[1-4]*",    ATA_HORKAGE_NO_NCQ_TRIM, },
+       { "Micron_M550*",               NULL,           ATA_HORKAGE_NO_NCQ_TRIM, },
+       { "Crucial_CT???M550SSD*",      NULL,           ATA_HORKAGE_NO_NCQ_TRIM, },
 
        /*
         * Some WD SATA-I drives spin up and down erratically when the link
@@ -4792,21 +4794,26 @@ void swap_buf_le16(u16 *buf, unsigned int buf_words)
 static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
 {
        struct ata_queued_cmd *qc = NULL;
-       unsigned int i;
+       unsigned int i, tag;
 
        /* no command while frozen */
        if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
                return NULL;
 
-       /* the last tag is reserved for internal command. */
-       for (i = 0; i < ATA_MAX_QUEUE - 1; i++)
-               if (!test_and_set_bit(i, &ap->qc_allocated)) {
-                       qc = __ata_qc_from_tag(ap, i);
+       for (i = 0; i < ATA_MAX_QUEUE; i++) {
+               tag = (i + ap->last_tag + 1) % ATA_MAX_QUEUE;
+
+               /* the last tag is reserved for internal command. */
+               if (tag == ATA_TAG_INTERNAL)
+                       continue;
+
+               if (!test_and_set_bit(tag, &ap->qc_allocated)) {
+                       qc = __ata_qc_from_tag(ap, tag);
+                       qc->tag = tag;
+                       ap->last_tag = tag;
                        break;
                }
-
-       if (qc)
-               qc->tag = i;
+       }
 
        return qc;
 }
index 6fac524c2f500381ac8d76b2a94bf4d88314f1e7..4edb1a81f63f68e3f37680b7334a3d7b81788f96 100644 (file)
@@ -898,9 +898,12 @@ static int arasan_cf_probe(struct platform_device *pdev)
 
        cf_card_detect(acdev, 0);
 
-       return ata_host_activate(host, acdev->irq, irq_handler, 0,
-                       &arasan_cf_sht);
+       ret = ata_host_activate(host, acdev->irq, irq_handler, 0,
+                               &arasan_cf_sht);
+       if (!ret)
+               return 0;
 
+       cf_exit(acdev);
 free_clk:
        clk_put(acdev->clk);
        return ret;
index e9c87274a781551d4496ac81b213855dd766ae41..8a66f23af4c40bd0ffd9e01776c02a16b26f0584 100644 (file)
@@ -407,12 +407,13 @@ static int pata_at91_probe(struct platform_device *pdev)
 
        host->private_data = info;
 
-       return ata_host_activate(host, gpio_is_valid(irq) ? gpio_to_irq(irq) : 0,
-                       gpio_is_valid(irq) ? ata_sff_interrupt : NULL,
-                       irq_flags, &pata_at91_sht);
+       ret = ata_host_activate(host, gpio_is_valid(irq) ? gpio_to_irq(irq) : 0,
+                               gpio_is_valid(irq) ? ata_sff_interrupt : NULL,
+                               irq_flags, &pata_at91_sht);
+       if (ret)
+               goto err_put;
 
-       if (!ret)
-               return 0;
+       return 0;
 
 err_put:
        clk_put(info->mck);
index a79566d056666f0d0449785856b679492d71456b..0610e78c8a2a8334cfa8b6d585285606af3613bc 100644 (file)
@@ -594,9 +594,13 @@ static int __init pata_s3c_probe(struct platform_device *pdev)
 
        platform_set_drvdata(pdev, host);
 
-       return ata_host_activate(host, info->irq,
-                       info->irq ? pata_s3c_irq : NULL,
-                       0, &pata_s3c_sht);
+       ret = ata_host_activate(host, info->irq,
+                               info->irq ? pata_s3c_irq : NULL,
+                               0, &pata_s3c_sht);
+       if (ret)
+               goto stop_clk;
+
+       return 0;
 
 stop_clk:
        clk_disable(info->clk);
index 1bdf104e90bb7f1924acf51f78caa39817d78efb..b621f56a36be5850b1abc4d1619b665fbce7ccf3 100644 (file)
@@ -2551,12 +2551,12 @@ done:
                timeout = 5 * 1000;
                while (atomic_read(&vc->scq->used) > 0) {
                        timeout = msleep_interruptible(timeout);
-                       if (!timeout)
+                       if (!timeout) {
+                               pr_warn("%s: SCQ drain timeout: %u used\n",
+                                       card->name, atomic_read(&vc->scq->used));
                                break;
+                       }
                }
-               if (!timeout)
-                       printk("%s: SCQ drain timeout: %u used\n",
-                              card->name, atomic_read(&vc->scq->used));
 
                writel(TCMDQ_HALT | vc->index, SAR_REG_TCMDQ);
                clear_scd(card, vc->scq, vc->class);
index 8986b9f22781fa667cf41a37b5889e8ac3be0a36..62ec61e8f84ac90d7c4e433ccc4ead67ff96fbd3 100644 (file)
@@ -52,6 +52,7 @@ static DEFINE_MUTEX(deferred_probe_mutex);
 static LIST_HEAD(deferred_probe_pending_list);
 static LIST_HEAD(deferred_probe_active_list);
 static struct workqueue_struct *deferred_wq;
+static atomic_t deferred_trigger_count = ATOMIC_INIT(0);
 
 /**
  * deferred_probe_work_func() - Retry probing devices in the active list.
@@ -135,6 +136,17 @@ static bool driver_deferred_probe_enable = false;
  * This functions moves all devices from the pending list to the active
  * list and schedules the deferred probe workqueue to process them.  It
  * should be called anytime a driver is successfully bound to a device.
+ *
+ * Note, there is a race condition in multi-threaded probe. In the case where
+ * more than one device is probing at the same time, it is possible for one
+ * probe to complete successfully while another is about to defer. If the second
+ * depends on the first, then it will get put on the pending list after the
+ * trigger event has already occured and will be stuck there.
+ *
+ * The atomic 'deferred_trigger_count' is used to determine if a successful
+ * trigger has occurred in the midst of probing a driver. If the trigger count
+ * changes in the midst of a probe, then deferred processing should be triggered
+ * again.
  */
 static void driver_deferred_probe_trigger(void)
 {
@@ -147,6 +159,7 @@ static void driver_deferred_probe_trigger(void)
         * into the active list so they can be retried by the workqueue
         */
        mutex_lock(&deferred_probe_mutex);
+       atomic_inc(&deferred_trigger_count);
        list_splice_tail_init(&deferred_probe_pending_list,
                              &deferred_probe_active_list);
        mutex_unlock(&deferred_probe_mutex);
@@ -265,6 +278,7 @@ static DECLARE_WAIT_QUEUE_HEAD(probe_waitqueue);
 static int really_probe(struct device *dev, struct device_driver *drv)
 {
        int ret = 0;
+       int local_trigger_count = atomic_read(&deferred_trigger_count);
 
        atomic_inc(&probe_count);
        pr_debug("bus: '%s': %s: probing driver %s with device %s\n",
@@ -310,6 +324,9 @@ probe_failed:
                /* Driver requested deferred probing */
                dev_info(dev, "Driver %s requests probe deferral\n", drv->name);
                driver_deferred_probe_add(dev);
+               /* Did a trigger occur while probing? Need to re-trigger if yes */
+               if (local_trigger_count != atomic_read(&deferred_trigger_count))
+                       driver_deferred_probe_trigger();
        } else if (ret != -ENODEV && ret != -ENXIO) {
                /* driver matched but the probe failed */
                printk(KERN_WARNING
index e714709704e4578ccc3703ca30108e596461ad28..5b47210889e038d72f7a172062b3c4ddd2daa07d 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/string.h>
 #include <linux/platform_device.h>
 #include <linux/of_device.h>
+#include <linux/of_irq.h>
 #include <linux/module.h>
 #include <linux/init.h>
 #include <linux/dma-mapping.h>
@@ -87,7 +88,11 @@ int platform_get_irq(struct platform_device *dev, unsigned int num)
                return -ENXIO;
        return dev->archdata.irqs[num];
 #else
-       struct resource *r = platform_get_resource(dev, IORESOURCE_IRQ, num);
+       struct resource *r;
+       if (IS_ENABLED(CONFIG_OF_IRQ) && dev->dev.of_node)
+               return of_irq_get(dev->dev.of_node, num);
+
+       r = platform_get_resource(dev, IORESOURCE_IRQ, num);
 
        return r ? r->start : -ENXIO;
 #endif
index 8f5565bf34cda31504e526ccc3d79d4e7fe20fd2..fa9bb742df6e0becfa8bca52576f17b5bdafe2bf 100644 (file)
@@ -3067,7 +3067,10 @@ static int raw_cmd_copyout(int cmd, void __user *param,
        int ret;
 
        while (ptr) {
-               ret = copy_to_user(param, ptr, sizeof(*ptr));
+               struct floppy_raw_cmd cmd = *ptr;
+               cmd.next = NULL;
+               cmd.kernel_data = NULL;
+               ret = copy_to_user(param, &cmd, sizeof(cmd));
                if (ret)
                        return -EFAULT;
                param += sizeof(struct floppy_raw_cmd);
@@ -3121,10 +3124,11 @@ loop:
                return -ENOMEM;
        *rcmd = ptr;
        ret = copy_from_user(ptr, param, sizeof(*ptr));
-       if (ret)
-               return -EFAULT;
        ptr->next = NULL;
        ptr->buffer_length = 0;
+       ptr->kernel_data = NULL;
+       if (ret)
+               return -EFAULT;
        param += sizeof(struct floppy_raw_cmd);
        if (ptr->cmd_count > 33)
                        /* the command may now also take up the space
@@ -3140,7 +3144,6 @@ loop:
        for (i = 0; i < 16; i++)
                ptr->reply[i] = 0;
        ptr->resultcode = 0;
-       ptr->kernel_data = NULL;
 
        if (ptr->flags & (FD_RAW_READ | FD_RAW_WRITE)) {
                if (ptr->length <= 0)
index be571fef185da6a597fcdac3d15093ed8e47fb5b..a83b57e57b6370572d53325638355a0d94ce24bf 100644 (file)
@@ -82,6 +82,7 @@ static const struct usb_device_id ath3k_table[] = {
        { USB_DEVICE(0x04CA, 0x3004) },
        { USB_DEVICE(0x04CA, 0x3005) },
        { USB_DEVICE(0x04CA, 0x3006) },
+       { USB_DEVICE(0x04CA, 0x3007) },
        { USB_DEVICE(0x04CA, 0x3008) },
        { USB_DEVICE(0x04CA, 0x300b) },
        { USB_DEVICE(0x0930, 0x0219) },
@@ -131,6 +132,7 @@ static const struct usb_device_id ath3k_blist_tbl[] = {
        { USB_DEVICE(0x04ca, 0x3004), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x04ca, 0x3006), .driver_info = BTUSB_ATH3012 },
+       { USB_DEVICE(0x04ca, 0x3007), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x04ca, 0x300b), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
index 7399303d7d9978447ff722d823c1dcece408029a..dc79f88f8717f478c8d8ab6d0a7d45849f772ac1 100644 (file)
@@ -59,6 +59,8 @@ struct btmrvl_device {
 };
 
 struct btmrvl_adapter {
+       void *hw_regs_buf;
+       u8 *hw_regs;
        u32 int_count;
        struct sk_buff_head tx_queue;
        u8 psmode;
@@ -140,7 +142,7 @@ void btmrvl_interrupt(struct btmrvl_private *priv);
 bool btmrvl_check_evtpkt(struct btmrvl_private *priv, struct sk_buff *skb);
 int btmrvl_process_event(struct btmrvl_private *priv, struct sk_buff *skb);
 
-int btmrvl_send_module_cfg_cmd(struct btmrvl_private *priv, int subcmd);
+int btmrvl_send_module_cfg_cmd(struct btmrvl_private *priv, u8 subcmd);
 int btmrvl_send_hscfg_cmd(struct btmrvl_private *priv);
 int btmrvl_enable_ps(struct btmrvl_private *priv);
 int btmrvl_prepare_command(struct btmrvl_private *priv);
index 2c4997ce248484703a1b859c5e518396fcdbfa64..e9dbddb0b8f1efb1f15ede65f50d80390ca370c3 100644 (file)
@@ -24,6 +24,7 @@
 #include <net/bluetooth/hci_core.h>
 
 #include "btmrvl_drv.h"
+#include "btmrvl_sdio.h"
 
 #define VERSION "1.0"
 
@@ -201,7 +202,7 @@ static int btmrvl_send_sync_cmd(struct btmrvl_private *priv, u16 opcode,
        return 0;
 }
 
-int btmrvl_send_module_cfg_cmd(struct btmrvl_private *priv, int subcmd)
+int btmrvl_send_module_cfg_cmd(struct btmrvl_private *priv, u8 subcmd)
 {
        int ret;
 
@@ -337,10 +338,25 @@ static int btmrvl_tx_pkt(struct btmrvl_private *priv, struct sk_buff *skb)
 
 static void btmrvl_init_adapter(struct btmrvl_private *priv)
 {
+       int buf_size;
+
        skb_queue_head_init(&priv->adapter->tx_queue);
 
        priv->adapter->ps_state = PS_AWAKE;
 
+       buf_size = ALIGN_SZ(SDIO_BLOCK_SIZE, BTSDIO_DMA_ALIGN);
+       priv->adapter->hw_regs_buf = kzalloc(buf_size, GFP_KERNEL);
+       if (!priv->adapter->hw_regs_buf) {
+               priv->adapter->hw_regs = NULL;
+               BT_ERR("Unable to allocate buffer for hw_regs.");
+       } else {
+               priv->adapter->hw_regs =
+                       (u8 *)ALIGN_ADDR(priv->adapter->hw_regs_buf,
+                                        BTSDIO_DMA_ALIGN);
+               BT_DBG("hw_regs_buf=%p hw_regs=%p",
+                      priv->adapter->hw_regs_buf, priv->adapter->hw_regs);
+       }
+
        init_waitqueue_head(&priv->adapter->cmd_wait_q);
 }
 
@@ -348,6 +364,7 @@ static void btmrvl_free_adapter(struct btmrvl_private *priv)
 {
        skb_queue_purge(&priv->adapter->tx_queue);
 
+       kfree(priv->adapter->hw_regs_buf);
        kfree(priv->adapter);
 
        priv->adapter = NULL;
index 1b52c9f5230d324d0476a2d7dd3f308e7fd723d8..9dedca516ff50567a278fb9a511dbcdfd7a1980c 100644 (file)
@@ -64,6 +64,7 @@ static const struct btmrvl_sdio_card_reg btmrvl_reg_8688 = {
        .io_port_0 = 0x00,
        .io_port_1 = 0x01,
        .io_port_2 = 0x02,
+       .int_read_to_clear = false,
 };
 static const struct btmrvl_sdio_card_reg btmrvl_reg_87xx = {
        .cfg = 0x00,
@@ -80,6 +81,7 @@ static const struct btmrvl_sdio_card_reg btmrvl_reg_87xx = {
        .io_port_0 = 0x78,
        .io_port_1 = 0x79,
        .io_port_2 = 0x7a,
+       .int_read_to_clear = false,
 };
 
 static const struct btmrvl_sdio_card_reg btmrvl_reg_88xx = {
@@ -97,6 +99,9 @@ static const struct btmrvl_sdio_card_reg btmrvl_reg_88xx = {
        .io_port_0 = 0xd8,
        .io_port_1 = 0xd9,
        .io_port_2 = 0xda,
+       .int_read_to_clear = true,
+       .host_int_rsr = 0x01,
+       .card_misc_cfg = 0xcc,
 };
 
 static const struct btmrvl_sdio_device btmrvl_sdio_sd8688 = {
@@ -667,46 +672,78 @@ static int btmrvl_sdio_process_int_status(struct btmrvl_private *priv)
        return 0;
 }
 
-static void btmrvl_sdio_interrupt(struct sdio_func *func)
+static int btmrvl_sdio_read_to_clear(struct btmrvl_sdio_card *card, u8 *ireg)
 {
-       struct btmrvl_private *priv;
-       struct btmrvl_sdio_card *card;
-       ulong flags;
-       u8 ireg = 0;
+       struct btmrvl_adapter *adapter = card->priv->adapter;
        int ret;
 
-       card = sdio_get_drvdata(func);
-       if (!card || !card->priv) {
-               BT_ERR("sbi_interrupt(%p) card or priv is "
-                               "NULL, card=%p\n", func, card);
-               return;
+       ret = sdio_readsb(card->func, adapter->hw_regs, 0, SDIO_BLOCK_SIZE);
+       if (ret) {
+               BT_ERR("sdio_readsb: read int hw_regs failed: %d", ret);
+               return ret;
        }
 
-       priv = card->priv;
+       *ireg = adapter->hw_regs[card->reg->host_intstatus];
+       BT_DBG("hw_regs[%#x]=%#x", card->reg->host_intstatus, *ireg);
+
+       return 0;
+}
 
-       ireg = sdio_readb(card->func, card->reg->host_intstatus, &ret);
+static int btmrvl_sdio_write_to_clear(struct btmrvl_sdio_card *card, u8 *ireg)
+{
+       int ret;
+
+       *ireg = sdio_readb(card->func, card->reg->host_intstatus, &ret);
        if (ret) {
-               BT_ERR("sdio_readb: read int status register failed");
-               return;
+               BT_ERR("sdio_readb: read int status failed: %d", ret);
+               return ret;
        }
 
-       if (ireg != 0) {
+       if (*ireg) {
                /*
                 * DN_LD_HOST_INT_STATUS and/or UP_LD_HOST_INT_STATUS
                 * Clear the interrupt status register and re-enable the
                 * interrupt.
                 */
-               BT_DBG("ireg = 0x%x", ireg);
+               BT_DBG("int_status = 0x%x", *ireg);
 
-               sdio_writeb(card->func, ~(ireg) & (DN_LD_HOST_INT_STATUS |
-                                       UP_LD_HOST_INT_STATUS),
-                               card->reg->host_intstatus, &ret);
+               sdio_writeb(card->func, ~(*ireg) & (DN_LD_HOST_INT_STATUS |
+                                                   UP_LD_HOST_INT_STATUS),
+                           card->reg->host_intstatus, &ret);
                if (ret) {
-                       BT_ERR("sdio_writeb: clear int status register failed");
-                       return;
+                       BT_ERR("sdio_writeb: clear int status failed: %d", ret);
+                       return ret;
                }
        }
 
+       return 0;
+}
+
+static void btmrvl_sdio_interrupt(struct sdio_func *func)
+{
+       struct btmrvl_private *priv;
+       struct btmrvl_sdio_card *card;
+       ulong flags;
+       u8 ireg = 0;
+       int ret;
+
+       card = sdio_get_drvdata(func);
+       if (!card || !card->priv) {
+               BT_ERR("sbi_interrupt(%p) card or priv is "
+                               "NULL, card=%p\n", func, card);
+               return;
+       }
+
+       priv = card->priv;
+
+       if (card->reg->int_read_to_clear)
+               ret = btmrvl_sdio_read_to_clear(card, &ireg);
+       else
+               ret = btmrvl_sdio_write_to_clear(card, &ireg);
+
+       if (ret)
+               return;
+
        spin_lock_irqsave(&priv->driver_lock, flags);
        sdio_ireg |= ireg;
        spin_unlock_irqrestore(&priv->driver_lock, flags);
@@ -777,6 +814,30 @@ static int btmrvl_sdio_register_dev(struct btmrvl_sdio_card *card)
 
        BT_DBG("SDIO FUNC%d IO port: 0x%x", func->num, card->ioport);
 
+       if (card->reg->int_read_to_clear) {
+               reg = sdio_readb(func, card->reg->host_int_rsr, &ret);
+               if (ret < 0) {
+                       ret = -EIO;
+                       goto release_irq;
+               }
+               sdio_writeb(func, reg | 0x3f, card->reg->host_int_rsr, &ret);
+               if (ret < 0) {
+                       ret = -EIO;
+                       goto release_irq;
+               }
+
+               reg = sdio_readb(func, card->reg->card_misc_cfg, &ret);
+               if (ret < 0) {
+                       ret = -EIO;
+                       goto release_irq;
+               }
+               sdio_writeb(func, reg | 0x10, card->reg->card_misc_cfg, &ret);
+               if (ret < 0) {
+                       ret = -EIO;
+                       goto release_irq;
+               }
+       }
+
        sdio_set_drvdata(func, card);
 
        sdio_release_host(func);
index 43d35a609ca9a94795afb731d230fa88ca109bef..d4dd3b0fa53d16d68da0101494e43ca4664716c2 100644 (file)
@@ -78,6 +78,9 @@ struct btmrvl_sdio_card_reg {
        u8 io_port_0;
        u8 io_port_1;
        u8 io_port_2;
+       bool int_read_to_clear;
+       u8 host_int_rsr;
+       u8 card_misc_cfg;
 };
 
 struct btmrvl_sdio_card {
index f338b0c5a8de507a153b6761886b943c702176a4..a7dfbf9a3afb6be53e372f78d9ee8202bdb17d08 100644 (file)
@@ -152,6 +152,7 @@ static const struct usb_device_id blacklist_table[] = {
        { USB_DEVICE(0x04ca, 0x3004), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x04ca, 0x3006), .driver_info = BTUSB_ATH3012 },
+       { USB_DEVICE(0x04ca, 0x3007), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x04ca, 0x300b), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
@@ -1485,10 +1486,8 @@ static int btusb_probe(struct usb_interface *intf,
        if (id->driver_info & BTUSB_BCM92035)
                hdev->setup = btusb_setup_bcm92035;
 
-       if (id->driver_info & BTUSB_INTEL) {
-               usb_enable_autosuspend(data->udev);
+       if (id->driver_info & BTUSB_INTEL)
                hdev->setup = btusb_setup_intel;
-       }
 
        /* Interface numbers are hardcoded in the specification */
        data->isoc = usb_ifnum_to_if(data->udev, 1);
index 7048a583fe51a695a044ad541a0f894e3bf7c510..66db9a803373efb92c8966c9f69fdb6b8ec7aa59 100644 (file)
@@ -55,13 +55,6 @@ struct h4_struct {
        struct sk_buff_head txq;
 };
 
-/* H4 receiver States */
-#define H4_W4_PACKET_TYPE      0
-#define H4_W4_EVENT_HDR                1
-#define H4_W4_ACL_HDR          2
-#define H4_W4_SCO_HDR          3
-#define H4_W4_DATA             4
-
 /* Initialize protocol */
 static int h4_open(struct hci_uart *hu)
 {
index 166e02f16c8a25f28f4441584dc6e8babc448f3b..cc37c342c4cb9a18dd355a66e06a993ff43de4e5 100644 (file)
@@ -764,7 +764,6 @@ static struct tegra_clk tegra124_clks[tegra_clk_max] __initdata = {
        [tegra_clk_sdmmc2_8] = { .dt_id = TEGRA124_CLK_SDMMC2, .present = true },
        [tegra_clk_i2s1] = { .dt_id = TEGRA124_CLK_I2S1, .present = true },
        [tegra_clk_i2c1] = { .dt_id = TEGRA124_CLK_I2C1, .present = true },
-       [tegra_clk_ndflash] = { .dt_id = TEGRA124_CLK_NDFLASH, .present = true },
        [tegra_clk_sdmmc1_8] = { .dt_id = TEGRA124_CLK_SDMMC1, .present = true },
        [tegra_clk_sdmmc4_8] = { .dt_id = TEGRA124_CLK_SDMMC4, .present = true },
        [tegra_clk_pwm] = { .dt_id = TEGRA124_CLK_PWM, .present = true },
@@ -809,7 +808,6 @@ static struct tegra_clk tegra124_clks[tegra_clk_max] __initdata = {
        [tegra_clk_trace] = { .dt_id = TEGRA124_CLK_TRACE, .present = true },
        [tegra_clk_soc_therm] = { .dt_id = TEGRA124_CLK_SOC_THERM, .present = true },
        [tegra_clk_dtv] = { .dt_id = TEGRA124_CLK_DTV, .present = true },
-       [tegra_clk_ndspeed] = { .dt_id = TEGRA124_CLK_NDSPEED, .present = true },
        [tegra_clk_i2cslow] = { .dt_id = TEGRA124_CLK_I2CSLOW, .present = true },
        [tegra_clk_dsib] = { .dt_id = TEGRA124_CLK_DSIB, .present = true },
        [tegra_clk_tsec] = { .dt_id = TEGRA124_CLK_TSEC, .present = true },
@@ -952,7 +950,6 @@ static struct tegra_clk tegra124_clks[tegra_clk_max] __initdata = {
        [tegra_clk_clk_out_3_mux] = { .dt_id = TEGRA124_CLK_CLK_OUT_3_MUX, .present = true },
        [tegra_clk_dsia_mux] = { .dt_id = TEGRA124_CLK_DSIA_MUX, .present = true },
        [tegra_clk_dsib_mux] = { .dt_id = TEGRA124_CLK_DSIB_MUX, .present = true },
-       [tegra_clk_uarte] = { .dt_id = TEGRA124_CLK_UARTE, .present = true },
 };
 
 static struct tegra_devclk devclks[] __initdata = {
index 67c8de572c50188af9fb50a1629bcfd0a73193f0..b4877e0ee91051ddbc2cebc4d4d569c6ffc3c2cb 100644 (file)
@@ -110,9 +110,25 @@ static struct ti_dt_clk am43xx_clks[] = {
 
 int __init am43xx_dt_clk_init(void)
 {
+       struct clk *clk1, *clk2;
+
        ti_dt_clocks_register(am43xx_clks);
 
        omap2_clk_disable_autoidle_all();
 
+       /*
+        * cpsw_cpts_rft_clk  has got the choice of 3 clocksources
+        * dpll_core_m4_ck, dpll_core_m5_ck and dpll_disp_m2_ck.
+        * By default dpll_core_m4_ck is selected, witn this as clock
+        * source the CPTS doesnot work properly. It gives clockcheck errors
+        * while running PTP.
+        * clockcheck: clock jumped backward or running slower than expected!
+        * By selecting dpll_core_m5_ck as the clocksource fixes this issue.
+        * In AM335x dpll_core_m5_ck is the default clocksource.
+        */
+       clk1 = clk_get_sys(NULL, "cpsw_cpts_rft_clk");
+       clk2 = clk_get_sys(NULL, "dpll_core_m5_ck");
+       clk_set_parent(clk1, clk2);
+
        return 0;
 }
index 2dc8b41a339dba3dc359e7ee34b10de98e8eb3fe..422391242b39ceac4a322ee532288ee46b6eba32 100644 (file)
@@ -100,9 +100,11 @@ void __init vexpress_osc_of_setup(struct device_node *node)
        struct clk *clk;
        u32 range[2];
 
+       vexpress_sysreg_of_early_init();
+
        osc = kzalloc(sizeof(*osc), GFP_KERNEL);
        if (!osc)
-               goto error;
+               return;
 
        osc->func = vexpress_config_func_get_by_node(node);
        if (!osc->func) {
index 57e823c44d2ad326eeaaa788fbb67d80ba620c7d..5163ec13429d1e37082b8d32c84684284217322f 100644 (file)
@@ -66,6 +66,7 @@ static int arch_timer_ppi[MAX_TIMER_PPI];
 static struct clock_event_device __percpu *arch_timer_evt;
 
 static bool arch_timer_use_virtual = true;
+static bool arch_timer_c3stop;
 static bool arch_timer_mem_use_virtual;
 
 /*
@@ -263,7 +264,8 @@ static void __arch_timer_setup(unsigned type,
        clk->features = CLOCK_EVT_FEAT_ONESHOT;
 
        if (type == ARCH_CP15_TIMER) {
-               clk->features |= CLOCK_EVT_FEAT_C3STOP;
+               if (arch_timer_c3stop)
+                       clk->features |= CLOCK_EVT_FEAT_C3STOP;
                clk->name = "arch_sys_timer";
                clk->rating = 450;
                clk->cpumask = cpumask_of(smp_processor_id());
@@ -665,6 +667,8 @@ static void __init arch_timer_init(struct device_node *np)
                }
        }
 
+       arch_timer_c3stop = !of_property_read_bool(np, "always-on");
+
        arch_timer_register();
        arch_timer_common_init();
 }
index a6ee6d7cd63f19a4cdad01a1a82956f990a76194..acf5a329d5387653b4359d27feb772afe389a92b 100644 (file)
@@ -416,8 +416,6 @@ static int exynos4_local_timer_setup(struct clock_event_device *evt)
        evt->set_mode = exynos4_tick_set_mode;
        evt->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
        evt->rating = 450;
-       clockevents_config_and_register(evt, clk_rate / (TICK_BASE_CNT + 1),
-                                       0xf, 0x7fffffff);
 
        exynos4_mct_write(TICK_BASE_CNT, mevt->base + MCT_L_TCNTB_OFFSET);
 
@@ -430,9 +428,12 @@ static int exynos4_local_timer_setup(struct clock_event_device *evt)
                                evt->irq);
                        return -EIO;
                }
+               irq_force_affinity(mct_irqs[MCT_L0_IRQ + cpu], cpumask_of(cpu));
        } else {
                enable_percpu_irq(mct_irqs[MCT_L0_IRQ], 0);
        }
+       clockevents_config_and_register(evt, clk_rate / (TICK_BASE_CNT + 1),
+                                       0xf, 0x7fffffff);
 
        return 0;
 }
@@ -450,7 +451,6 @@ static int exynos4_mct_cpu_notify(struct notifier_block *self,
                                           unsigned long action, void *hcpu)
 {
        struct mct_clock_event_device *mevt;
-       unsigned int cpu;
 
        /*
         * Grab cpu pointer in each case to avoid spurious
@@ -461,12 +461,6 @@ static int exynos4_mct_cpu_notify(struct notifier_block *self,
                mevt = this_cpu_ptr(&percpu_mct_tick);
                exynos4_local_timer_setup(&mevt->evt);
                break;
-       case CPU_ONLINE:
-               cpu = (unsigned long)hcpu;
-               if (mct_int_type == MCT_INT_SPI)
-                       irq_set_affinity(mct_irqs[MCT_L0_IRQ + cpu],
-                                               cpumask_of(cpu));
-               break;
        case CPU_DYING:
                mevt = this_cpu_ptr(&percpu_mct_tick);
                exynos4_local_timer_stop(&mevt->evt);
index ca81809d159d5ebf49ddcb6f9df0bc38208fefce..7ce442148c3f5dfb32449498bc9352066050688e 100644 (file)
@@ -212,4 +212,9 @@ error_free:
        return ret;
 }
 
-CLOCKSOURCE_OF_DECLARE(zevio_timer, "lsi,zevio-timer", zevio_timer_add);
+static void __init zevio_timer_init(struct device_node *node)
+{
+       BUG_ON(zevio_timer_add(node));
+}
+
+CLOCKSOURCE_OF_DECLARE(zevio_timer, "lsi,zevio-timer", zevio_timer_init);
index 148d707a1d439375ef36bf2ac8e6655fe2a61042..ccdd4c7e748b3b1e63b75e08398ac7972fde66b8 100644 (file)
@@ -369,7 +369,7 @@ static void cn_proc_mcast_ctl(struct cn_msg *msg,
                return;
 
        /* Can only change if privileged. */
-       if (!capable(CAP_NET_ADMIN)) {
+       if (!__netlink_ns_capable(nsp, &init_user_ns, CAP_NET_ADMIN)) {
                err = EPERM;
                goto out;
        }
index 0e9cce82844bf519f62b7bed24bf0783d89400e9..580503513f0f10687d46e666e19f253d7fbbfb51 100644 (file)
@@ -92,11 +92,7 @@ config ARM_EXYNOS_CPU_FREQ_BOOST_SW
 
 config ARM_HIGHBANK_CPUFREQ
        tristate "Calxeda Highbank-based"
-       depends on ARCH_HIGHBANK
-       select GENERIC_CPUFREQ_CPU0
-       select PM_OPP
-       select REGULATOR
-
+       depends on ARCH_HIGHBANK && GENERIC_CPUFREQ_CPU0 && REGULATOR
        default m
        help
          This adds the CPUFreq driver for Calxeda Highbank SoC
index d00e5d1abd258b469bf48862a5f14b08e04f97a7..5c4369b5d834d93f05095cf6052848ec2d2ddec4 100644 (file)
@@ -242,7 +242,7 @@ static void do_powersaver(int cx_address, unsigned int mults_index,
  * Sets a new clock ratio.
  */
 
-static void longhaul_setstate(struct cpufreq_policy *policy,
+static int longhaul_setstate(struct cpufreq_policy *policy,
                unsigned int table_index)
 {
        unsigned int mults_index;
@@ -258,10 +258,12 @@ static void longhaul_setstate(struct cpufreq_policy *policy,
        /* Safety precautions */
        mult = mults[mults_index & 0x1f];
        if (mult == -1)
-               return;
+               return -EINVAL;
+
        speed = calc_speed(mult);
        if ((speed > highest_speed) || (speed < lowest_speed))
-               return;
+               return -EINVAL;
+
        /* Voltage transition before frequency transition? */
        if (can_scale_voltage && longhaul_index < table_index)
                dir = 1;
@@ -269,8 +271,6 @@ static void longhaul_setstate(struct cpufreq_policy *policy,
        freqs.old = calc_speed(longhaul_get_cpu_mult());
        freqs.new = speed;
 
-       cpufreq_freq_transition_begin(policy, &freqs);
-
        pr_debug("Setting to FSB:%dMHz Mult:%d.%dx (%s)\n",
                        fsb, mult/10, mult%10, print_speed(speed/1000));
 retry_loop:
@@ -385,12 +385,14 @@ retry_loop:
                        goto retry_loop;
                }
        }
-       /* Report true CPU frequency */
-       cpufreq_freq_transition_end(policy, &freqs, 0);
 
-       if (!bm_timeout)
+       if (!bm_timeout) {
                printk(KERN_INFO PFX "Warning: Timeout while waiting for "
                                "idle PCI bus.\n");
+               return -EBUSY;
+       }
+
+       return 0;
 }
 
 /*
@@ -631,9 +633,10 @@ static int longhaul_target(struct cpufreq_policy *policy,
        unsigned int i;
        unsigned int dir = 0;
        u8 vid, current_vid;
+       int retval = 0;
 
        if (!can_scale_voltage)
-               longhaul_setstate(policy, table_index);
+               retval = longhaul_setstate(policy, table_index);
        else {
                /* On test system voltage transitions exceeding single
                 * step up or down were turning motherboard off. Both
@@ -648,7 +651,7 @@ static int longhaul_target(struct cpufreq_policy *policy,
                while (i != table_index) {
                        vid = (longhaul_table[i].driver_data >> 8) & 0x1f;
                        if (vid != current_vid) {
-                               longhaul_setstate(policy, i);
+                               retval = longhaul_setstate(policy, i);
                                current_vid = vid;
                                msleep(200);
                        }
@@ -657,10 +660,11 @@ static int longhaul_target(struct cpufreq_policy *policy,
                        else
                                i--;
                }
-               longhaul_setstate(policy, table_index);
+               retval = longhaul_setstate(policy, table_index);
        }
+
        longhaul_index = table_index;
-       return 0;
+       return retval;
 }
 
 
@@ -968,7 +972,15 @@ static void __exit longhaul_exit(void)
 
        for (i = 0; i < numscales; i++) {
                if (mults[i] == maxmult) {
+                       struct cpufreq_freqs freqs;
+
+                       freqs.old = policy->cur;
+                       freqs.new = longhaul_table[i].frequency;
+                       freqs.flags = 0;
+
+                       cpufreq_freq_transition_begin(policy, &freqs);
                        longhaul_setstate(policy, i);
+                       cpufreq_freq_transition_end(policy, &freqs, 0);
                        break;
                }
        }
index 49f120e1bc7be0ecb879f184424d1dc56ee63981..78904e6ca4a020d53a60f4139711441a069ad447 100644 (file)
@@ -138,22 +138,14 @@ static void powernow_k6_set_cpu_multiplier(unsigned int best_i)
 static int powernow_k6_target(struct cpufreq_policy *policy,
                unsigned int best_i)
 {
-       struct cpufreq_freqs freqs;
 
        if (clock_ratio[best_i].driver_data > max_multiplier) {
                printk(KERN_ERR PFX "invalid target frequency\n");
                return -EINVAL;
        }
 
-       freqs.old = busfreq * powernow_k6_get_cpu_multiplier();
-       freqs.new = busfreq * clock_ratio[best_i].driver_data;
-
-       cpufreq_freq_transition_begin(policy, &freqs);
-
        powernow_k6_set_cpu_multiplier(best_i);
 
-       cpufreq_freq_transition_end(policy, &freqs, 0);
-
        return 0;
 }
 
@@ -227,9 +219,20 @@ have_busfreq:
 static int powernow_k6_cpu_exit(struct cpufreq_policy *policy)
 {
        unsigned int i;
-       for (i = 0; i < 8; i++) {
-               if (i == max_multiplier)
+
+       for (i = 0; (clock_ratio[i].frequency != CPUFREQ_TABLE_END); i++) {
+               if (clock_ratio[i].driver_data == max_multiplier) {
+                       struct cpufreq_freqs freqs;
+
+                       freqs.old = policy->cur;
+                       freqs.new = clock_ratio[i].frequency;
+                       freqs.flags = 0;
+
+                       cpufreq_freq_transition_begin(policy, &freqs);
                        powernow_k6_target(policy, i);
+                       cpufreq_freq_transition_end(policy, &freqs, 0);
+                       break;
+               }
        }
        return 0;
 }
index f911645c3f6db59e18f32b68c06bfc890283c029..e61e224475ad457fd71b6934b2a5256a2d96fcc0 100644 (file)
@@ -269,8 +269,6 @@ static int powernow_target(struct cpufreq_policy *policy, unsigned int index)
 
        freqs.new = powernow_table[index].frequency;
 
-       cpufreq_freq_transition_begin(policy, &freqs);
-
        /* Now do the magic poking into the MSRs.  */
 
        if (have_a0 == 1)       /* A0 errata 5 */
@@ -290,8 +288,6 @@ static int powernow_target(struct cpufreq_policy *policy, unsigned int index)
        if (have_a0 == 1)
                local_irq_enable();
 
-       cpufreq_freq_transition_end(policy, &freqs, 0);
-
        return 0;
 }
 
index 9edccc63245df25d93c22adfb6d6d608b069dfb2..af4968813e76b433acbbda0d3e6f4f28d6476e43 100644 (file)
@@ -29,6 +29,7 @@
 
 #include <asm/cputhreads.h>
 #include <asm/reg.h>
+#include <asm/smp.h> /* Required for cpu_sibling_mask() in UP configs */
 
 #define POWERNV_MAX_PSTATES    256
 
index b7e677be1df034cdebdea6580b5726bc77c9aa39..0af618abebafa4b44b323d1811c1f885a52e0beb 100644 (file)
@@ -138,6 +138,7 @@ static int corenet_cpufreq_cpu_init(struct cpufreq_policy *policy)
        struct cpufreq_frequency_table *table;
        struct cpu_data *data;
        unsigned int cpu = policy->cpu;
+       u64 transition_latency_hz;
 
        np = of_get_cpu_node(cpu, NULL);
        if (!np)
@@ -205,8 +206,10 @@ static int corenet_cpufreq_cpu_init(struct cpufreq_policy *policy)
        for_each_cpu(i, per_cpu(cpu_mask, cpu))
                per_cpu(cpu_data, i) = data;
 
+       transition_latency_hz = 12ULL * NSEC_PER_SEC;
        policy->cpuinfo.transition_latency =
-                               (12 * NSEC_PER_SEC) / fsl_get_sys_freq();
+               do_div(transition_latency_hz, fsl_get_sys_freq());
+
        of_node_put(np);
 
        return 0;
index 8d045afa7fb406445b4996334e22a13e9f9572d8..6f9dfa80563a344249ef153aa4f92b17b35ce158 100644 (file)
@@ -60,9 +60,7 @@ static int __init ucv2_cpu_init(struct cpufreq_policy *policy)
        policy->max = policy->cpuinfo.max_freq = 1000000;
        policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
        policy->clk = clk_get(NULL, "MAIN_CLK");
-       if (IS_ERR(policy->clk))
-               return PTR_ERR(policy->clk);
-       return 0;
+       return PTR_ERR_OR_ZERO(policy->clk);
 }
 
 static struct cpufreq_driver ucv2_driver = {
index ba06d1d2f99e39c50e5ed82f08fffdbbe20b7720..5c5863842de92bdd653fb0f24fdea96228c9908a 100644 (file)
@@ -197,7 +197,7 @@ config AMCC_PPC440SPE_ADMA
 
 config TIMB_DMA
        tristate "Timberdale FPGA DMA support"
-       depends on MFD_TIMBERDALE || HAS_IOMEM
+       depends on MFD_TIMBERDALE
        select DMA_ENGINE
        help
          Enable support for the Timberdale FPGA DMA engine.
index cd04eb7b182e338994f03f8f1d1f84c28d49fc40..926360c2db6abcb3b448e815246e182fe1a7d3ae 100644 (file)
@@ -182,11 +182,13 @@ static void edma_execute(struct edma_chan *echan)
                                  echan->ecc->dummy_slot);
        }
 
-       edma_resume(echan->ch_num);
-
        if (edesc->processed <= MAX_NR_SG) {
                dev_dbg(dev, "first transfer starting %d\n", echan->ch_num);
                edma_start(echan->ch_num);
+       } else {
+               dev_dbg(dev, "chan: %d: completed %d elements, resuming\n",
+                       echan->ch_num, edesc->processed);
+               edma_resume(echan->ch_num);
        }
 
        /*
index 381e793184baefdbda1806544d855a02487cda6a..b396a7fb53abb5df611e4fbd551000758f876100 100644 (file)
@@ -968,7 +968,17 @@ static struct platform_driver fsl_edma_driver = {
        .remove         = fsl_edma_remove,
 };
 
-module_platform_driver(fsl_edma_driver);
+static int __init fsl_edma_init(void)
+{
+       return platform_driver_register(&fsl_edma_driver);
+}
+subsys_initcall(fsl_edma_init);
+
+static void __exit fsl_edma_exit(void)
+{
+       platform_driver_unregister(&fsl_edma_driver);
+}
+module_exit(fsl_edma_exit);
 
 MODULE_ALIAS("platform:fsl-edma");
 MODULE_DESCRIPTION("Freescale eDMA engine driver");
index a1bd8298d55f1973313b969bf560c76967212dc4..03f7820fa333b89a445dc3831386ba371b93a1e1 100644 (file)
@@ -666,7 +666,7 @@ static struct dma_chan *of_dma_sirfsoc_xlate(struct of_phandle_args *dma_spec,
        struct sirfsoc_dma *sdma = ofdma->of_dma_data;
        unsigned int request = dma_spec->args[0];
 
-       if (request > SIRFSOC_DMA_CHANNELS)
+       if (request >= SIRFSOC_DMA_CHANNELS)
                return NULL;
 
        return dma_get_slave_channel(&sdma->channels[request].chan);
index bf0f8b476696eeade9ff5b7947dc64628181a7ca..401add28933f4f6f39c70b626e081ec39aef554b 100644 (file)
@@ -233,7 +233,7 @@ static void acpi_gpiochip_request_interrupts(struct acpi_gpio_chip *acpi_gpio)
 {
        struct gpio_chip *chip = acpi_gpio->chip;
 
-       if (!chip->dev || !chip->to_irq)
+       if (!chip->to_irq)
                return;
 
        INIT_LIST_HEAD(&acpi_gpio->events);
@@ -253,7 +253,7 @@ static void acpi_gpiochip_free_interrupts(struct acpi_gpio_chip *acpi_gpio)
        struct acpi_gpio_event *event, *ep;
        struct gpio_chip *chip = acpi_gpio->chip;
 
-       if (!chip->dev || !chip->to_irq)
+       if (!chip->to_irq)
                return;
 
        list_for_each_entry_safe_reverse(event, ep, &acpi_gpio->events, node) {
@@ -451,7 +451,7 @@ acpi_gpio_adr_space_handler(u32 function, acpi_physical_address address,
                if (function == ACPI_WRITE)
                        gpiod_set_raw_value(desc, !!((1 << i) & *value));
                else
-                       *value |= gpiod_get_raw_value(desc) << i;
+                       *value |= (u64)gpiod_get_raw_value(desc) << i;
        }
 
 out:
@@ -501,6 +501,9 @@ void acpi_gpiochip_add(struct gpio_chip *chip)
        acpi_handle handle;
        acpi_status status;
 
+       if (!chip || !chip->dev)
+               return;
+
        handle = ACPI_HANDLE(chip->dev);
        if (!handle)
                return;
@@ -531,6 +534,9 @@ void acpi_gpiochip_remove(struct gpio_chip *chip)
        acpi_handle handle;
        acpi_status status;
 
+       if (!chip || !chip->dev)
+               return;
+
        handle = ACPI_HANDLE(chip->dev);
        if (!handle)
                return;
index 761013f8b82f5a3d7c534f201a45f2c5902cd188..f48817d974802c3ec771814bef84bc97ab6039b5 100644 (file)
@@ -1387,8 +1387,8 @@ static int gpiochip_irq_map(struct irq_domain *d, unsigned int irq,
 {
        struct gpio_chip *chip = d->host_data;
 
-       irq_set_chip_and_handler(irq, chip->irqchip, chip->irq_handler);
        irq_set_chip_data(irq, chip);
+       irq_set_chip_and_handler(irq, chip->irqchip, chip->irq_handler);
 #ifdef CONFIG_ARM
        set_irq_flags(irq, IRQF_VALID);
 #else
index e930d4fe29c71c2d82d2317bb6681ac8d3f39d6a..1ef5ab9c9d519d175b202dbf01cba243a870e1f7 100644 (file)
@@ -145,6 +145,7 @@ exynos_drm_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode,
 
        plane->crtc = crtc;
        plane->fb = crtc->primary->fb;
+       drm_framebuffer_reference(plane->fb);
 
        return 0;
 }
index c786cd4f457bb8893fc4f02e8e27338832b1bc58..2a3ad24276f8380415940d00b87708f9cbe0d9e2 100644 (file)
@@ -263,7 +263,7 @@ struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,
        buffer->sgt = sgt;
        exynos_gem_obj->base.import_attach = attach;
 
-       DRM_DEBUG_PRIME("dma_addr = 0x%x, size = 0x%lx\n", buffer->dma_addr,
+       DRM_DEBUG_PRIME("dma_addr = %pad, size = 0x%lx\n", &buffer->dma_addr,
                                                                buffer->size);
 
        return &exynos_gem_obj->base;
index eb73e3bf2a0cbe6e56f9ae6b5d81b188038f05e3..4ac438187568ed4a6894436433e404c94b90a7a2 100644 (file)
@@ -1426,9 +1426,9 @@ static int exynos_dsi_probe(struct platform_device *pdev)
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        dsi->reg_base = devm_ioremap_resource(&pdev->dev, res);
-       if (!dsi->reg_base) {
+       if (IS_ERR(dsi->reg_base)) {
                dev_err(&pdev->dev, "failed to remap io region\n");
-               return -EADDRNOTAVAIL;
+               return PTR_ERR(dsi->reg_base);
        }
 
        dsi->phy = devm_phy_get(&pdev->dev, "dsim");
index 7afead9c3f30258b7869e69e0f2015d9c2921fca..852f2dadaebdbbe3a385b5fe28cd65a560f0c210 100644 (file)
@@ -220,7 +220,7 @@ static void vidi_win_commit(struct exynos_drm_manager *mgr, int zpos)
 
        win_data->enabled = true;
 
-       DRM_DEBUG_KMS("dma_addr = 0x%x\n", win_data->dma_addr);
+       DRM_DEBUG_KMS("dma_addr = %pad\n", &win_data->dma_addr);
 
        if (ctx->vblank_on)
                schedule_work(&ctx->work);
index ab5e93c30aa2bde43b1ed892f0437b7b5d0c8289..62a5c3627b90eaea4e6620cb829af572781e3d94 100644 (file)
@@ -50,7 +50,7 @@ bool intel_enable_ppgtt(struct drm_device *dev, bool full)
 
        /* Full ppgtt disabled by default for now due to issues. */
        if (full)
-               return false; /* HAS_PPGTT(dev) */
+               return HAS_PPGTT(dev) && (i915.enable_ppgtt == 2);
        else
                return HAS_ALIASING_PPGTT(dev);
 }
index 7753249b3a959cce7f31b8c9cf1ba0b36c18dce8..f98ba4e6e70b940c150c504782fc943abee1544a 100644 (file)
@@ -1362,10 +1362,20 @@ static inline void intel_hpd_irq_handler(struct drm_device *dev,
        spin_lock(&dev_priv->irq_lock);
        for (i = 1; i < HPD_NUM_PINS; i++) {
 
-               WARN_ONCE(hpd[i] & hotplug_trigger &&
-                         dev_priv->hpd_stats[i].hpd_mark == HPD_DISABLED,
-                         "Received HPD interrupt (0x%08x) on pin %d (0x%08x) although disabled\n",
-                         hotplug_trigger, i, hpd[i]);
+               if (hpd[i] & hotplug_trigger &&
+                   dev_priv->hpd_stats[i].hpd_mark == HPD_DISABLED) {
+                       /*
+                        * On GMCH platforms the interrupt mask bits only
+                        * prevent irq generation, not the setting of the
+                        * hotplug bits itself. So only WARN about unexpected
+                        * interrupts on saner platforms.
+                        */
+                       WARN_ONCE(INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev),
+                                 "Received HPD interrupt (0x%08x) on pin %d (0x%08x) although disabled\n",
+                                 hotplug_trigger, i, hpd[i]);
+
+                       continue;
+               }
 
                if (!(hpd[i] & hotplug_trigger) ||
                    dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED)
index 9f5b18d9d8850e886eeb44ca68b0bd17af4c3f30..c77af69c2d8f5f8c97f5dc2045fd99a07960617b 100644 (file)
@@ -827,6 +827,7 @@ enum punit_power_well {
 # define MI_FLUSH_ENABLE                               (1 << 12)
 # define ASYNC_FLIP_PERF_DISABLE                       (1 << 14)
 # define MODE_IDLE                                     (1 << 9)
+# define STOP_RING                                     (1 << 8)
 
 #define GEN6_GT_MODE   0x20d0
 #define GEN7_GT_MODE   0x7008
index dae976f51d83357a51637fca61bd830bb9753010..69bcc42a0e44327679217d29a9415e553bd3564b 100644 (file)
@@ -9654,11 +9654,22 @@ intel_pipe_config_compare(struct drm_device *dev,
        PIPE_CONF_CHECK_I(pipe_src_w);
        PIPE_CONF_CHECK_I(pipe_src_h);
 
-       PIPE_CONF_CHECK_I(gmch_pfit.control);
-       /* pfit ratios are autocomputed by the hw on gen4+ */
-       if (INTEL_INFO(dev)->gen < 4)
-               PIPE_CONF_CHECK_I(gmch_pfit.pgm_ratios);
-       PIPE_CONF_CHECK_I(gmch_pfit.lvds_border_bits);
+       /*
+        * FIXME: BIOS likes to set up a cloned config with lvds+external
+        * screen. Since we don't yet re-compute the pipe config when moving
+        * just the lvds port away to another pipe the sw tracking won't match.
+        *
+        * Proper atomic modesets with recomputed global state will fix this.
+        * Until then just don't check gmch state for inherited modes.
+        */
+       if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_INHERITED_MODE)) {
+               PIPE_CONF_CHECK_I(gmch_pfit.control);
+               /* pfit ratios are autocomputed by the hw on gen4+ */
+               if (INTEL_INFO(dev)->gen < 4)
+                       PIPE_CONF_CHECK_I(gmch_pfit.pgm_ratios);
+               PIPE_CONF_CHECK_I(gmch_pfit.lvds_border_bits);
+       }
+
        PIPE_CONF_CHECK_I(pch_pfit.enabled);
        if (current_config->pch_pfit.enabled) {
                PIPE_CONF_CHECK_I(pch_pfit.pos);
@@ -11616,6 +11627,8 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
                            base.head) {
                memset(&crtc->config, 0, sizeof(crtc->config));
 
+               crtc->config.quirks |= PIPE_CONFIG_QUIRK_INHERITED_MODE;
+
                crtc->active = dev_priv->display.get_pipe_config(crtc,
                                                                 &crtc->config);
 
index d2a55884ad523b8a54546bfbbe0565889e73261e..dfa85289f28f301fe259b522b45ef1521489175e 100644 (file)
@@ -3619,7 +3619,8 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
 {
        struct drm_connector *connector = &intel_connector->base;
        struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
-       struct drm_device *dev = intel_dig_port->base.base.dev;
+       struct intel_encoder *intel_encoder = &intel_dig_port->base;
+       struct drm_device *dev = intel_encoder->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_display_mode *fixed_mode = NULL;
        bool has_dpcd;
@@ -3629,6 +3630,14 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
        if (!is_edp(intel_dp))
                return true;
 
+       /* The VDD bit needs a power domain reference, so if the bit is already
+        * enabled when we boot, grab this reference. */
+       if (edp_have_panel_vdd(intel_dp)) {
+               enum intel_display_power_domain power_domain;
+               power_domain = intel_display_port_power_domain(intel_encoder);
+               intel_display_power_get(dev_priv, power_domain);
+       }
+
        /* Cache DPCD and EDID for edp. */
        intel_edp_panel_vdd_on(intel_dp);
        has_dpcd = intel_dp_get_dpcd(intel_dp);
index 0542de98226018a9427519d0eb6996714f3c0582..328b1a70264b4c12a07400de7284b2968813aa94 100644 (file)
@@ -236,7 +236,8 @@ struct intel_crtc_config {
         * tracked with quirk flags so that fastboot and state checker can act
         * accordingly.
         */
-#define PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS (1<<0) /* unreliable sync mode.flags */
+#define PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS      (1<<0) /* unreliable sync mode.flags */
+#define PIPE_CONFIG_QUIRK_INHERITED_MODE       (1<<1) /* mode inherited from firmware */
        unsigned long quirks;
 
        /* User requested mode, only valid as a starting point to
index b4d44e62f0c769746a538f70afdf9916c95f6bd8..fce4a0d93c0b19b7d51f4d2578331b13aef51399 100644 (file)
@@ -132,6 +132,16 @@ static int intelfb_create(struct drm_fb_helper *helper,
 
        mutex_lock(&dev->struct_mutex);
 
+       if (intel_fb &&
+           (sizes->fb_width > intel_fb->base.width ||
+            sizes->fb_height > intel_fb->base.height)) {
+               DRM_DEBUG_KMS("BIOS fb too small (%dx%d), we require (%dx%d),"
+                             " releasing it\n",
+                             intel_fb->base.width, intel_fb->base.height,
+                             sizes->fb_width, sizes->fb_height);
+               drm_framebuffer_unreference(&intel_fb->base);
+               intel_fb = ifbdev->fb = NULL;
+       }
        if (!intel_fb || WARN_ON(!intel_fb->obj)) {
                DRM_DEBUG_KMS("no BIOS fb, allocating a new one\n");
                ret = intelfb_alloc(helper, sizes);
index b0413e190625b26c0552e8a737479f52b3dd5c9d..157267aa356165b7b7fcd94c3318a79f04f2bb8c 100644 (file)
@@ -821,11 +821,11 @@ static void intel_disable_hdmi(struct intel_encoder *encoder)
        }
 }
 
-static int hdmi_portclock_limit(struct intel_hdmi *hdmi)
+static int hdmi_portclock_limit(struct intel_hdmi *hdmi, bool respect_dvi_limit)
 {
        struct drm_device *dev = intel_hdmi_to_dev(hdmi);
 
-       if (!hdmi->has_hdmi_sink || IS_G4X(dev))
+       if ((respect_dvi_limit && !hdmi->has_hdmi_sink) || IS_G4X(dev))
                return 165000;
        else if (IS_HASWELL(dev) || INTEL_INFO(dev)->gen >= 8)
                return 300000;
@@ -837,7 +837,8 @@ static enum drm_mode_status
 intel_hdmi_mode_valid(struct drm_connector *connector,
                      struct drm_display_mode *mode)
 {
-       if (mode->clock > hdmi_portclock_limit(intel_attached_hdmi(connector)))
+       if (mode->clock > hdmi_portclock_limit(intel_attached_hdmi(connector),
+                                              true))
                return MODE_CLOCK_HIGH;
        if (mode->clock < 20000)
                return MODE_CLOCK_LOW;
@@ -879,7 +880,7 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
        struct drm_device *dev = encoder->base.dev;
        struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
        int clock_12bpc = pipe_config->adjusted_mode.crtc_clock * 3 / 2;
-       int portclock_limit = hdmi_portclock_limit(intel_hdmi);
+       int portclock_limit = hdmi_portclock_limit(intel_hdmi, false);
        int desired_bpp;
 
        if (intel_hdmi->color_range_auto) {
index 6bc68bdcf433cf06a68d52d95b063f9a42795194..79fb4cc2137c19d2cf4ad5dc23e6c5883eb58e4d 100644 (file)
@@ -437,32 +437,41 @@ static void ring_setup_phys_status_page(struct intel_ring_buffer *ring)
        I915_WRITE(HWS_PGA, addr);
 }
 
-static int init_ring_common(struct intel_ring_buffer *ring)
+static bool stop_ring(struct intel_ring_buffer *ring)
 {
-       struct drm_device *dev = ring->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       struct drm_i915_gem_object *obj = ring->obj;
-       int ret = 0;
-       u32 head;
+       struct drm_i915_private *dev_priv = to_i915(ring->dev);
 
-       gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
+       if (!IS_GEN2(ring->dev)) {
+               I915_WRITE_MODE(ring, _MASKED_BIT_ENABLE(STOP_RING));
+               if (wait_for_atomic((I915_READ_MODE(ring) & MODE_IDLE) != 0, 1000)) {
+                       DRM_ERROR("%s :timed out trying to stop ring\n", ring->name);
+                       return false;
+               }
+       }
 
-       /* Stop the ring if it's running. */
        I915_WRITE_CTL(ring, 0);
        I915_WRITE_HEAD(ring, 0);
        ring->write_tail(ring, 0);
-       if (wait_for_atomic((I915_READ_MODE(ring) & MODE_IDLE) != 0, 1000))
-               DRM_ERROR("%s :timed out trying to stop ring\n", ring->name);
 
-       if (I915_NEED_GFX_HWS(dev))
-               intel_ring_setup_status_page(ring);
-       else
-               ring_setup_phys_status_page(ring);
+       if (!IS_GEN2(ring->dev)) {
+               (void)I915_READ_CTL(ring);
+               I915_WRITE_MODE(ring, _MASKED_BIT_DISABLE(STOP_RING));
+       }
 
-       head = I915_READ_HEAD(ring) & HEAD_ADDR;
+       return (I915_READ_HEAD(ring) & HEAD_ADDR) == 0;
+}
 
-       /* G45 ring initialization fails to reset head to zero */
-       if (head != 0) {
+static int init_ring_common(struct intel_ring_buffer *ring)
+{
+       struct drm_device *dev = ring->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_gem_object *obj = ring->obj;
+       int ret = 0;
+
+       gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
+
+       if (!stop_ring(ring)) {
+               /* G45 ring initialization often fails to reset head to zero */
                DRM_DEBUG_KMS("%s head not reset to zero "
                              "ctl %08x head %08x tail %08x start %08x\n",
                              ring->name,
@@ -471,9 +480,7 @@ static int init_ring_common(struct intel_ring_buffer *ring)
                              I915_READ_TAIL(ring),
                              I915_READ_START(ring));
 
-               I915_WRITE_HEAD(ring, 0);
-
-               if (I915_READ_HEAD(ring) & HEAD_ADDR) {
+               if (!stop_ring(ring)) {
                        DRM_ERROR("failed to set %s head to zero "
                                  "ctl %08x head %08x tail %08x start %08x\n",
                                  ring->name,
@@ -481,9 +488,16 @@ static int init_ring_common(struct intel_ring_buffer *ring)
                                  I915_READ_HEAD(ring),
                                  I915_READ_TAIL(ring),
                                  I915_READ_START(ring));
+                       ret = -EIO;
+                       goto out;
                }
        }
 
+       if (I915_NEED_GFX_HWS(dev))
+               intel_ring_setup_status_page(ring);
+       else
+               ring_setup_phys_status_page(ring);
+
        /* Initialize the ring. This must happen _after_ we've cleared the ring
         * registers with the above sequence (the readback of the HEAD registers
         * also enforces ordering), otherwise the hw might lose the new ring
index 270a6a9734387b6079e9348e17773214a293ad0a..2b91c4b4d34b2efd6029bb52f46ece14eb2e515f 100644 (file)
@@ -34,6 +34,7 @@ struct  intel_hw_status_page {
 #define I915_WRITE_IMR(ring, val) I915_WRITE(RING_IMR((ring)->mmio_base), val)
 
 #define I915_READ_MODE(ring) I915_READ(RING_MI_MODE((ring)->mmio_base))
+#define I915_WRITE_MODE(ring, val) I915_WRITE(RING_MI_MODE((ring)->mmio_base), val)
 
 enum intel_ring_hangcheck_action {
        HANGCHECK_IDLE = 0,
index 3e6c0f3ed592a6b746610b9b6dd1986e50e1ace7..ef9957dbac943bdda6a1e6fd9bca142ebb202cef 100644 (file)
@@ -510,9 +510,8 @@ static void update_cursor(struct drm_crtc *crtc)
                                        MDP4_DMA_CURSOR_BLEND_CONFIG_CURSOR_EN);
                } else {
                        /* disable cursor: */
-                       mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_BASE(dma), 0);
-                       mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_BLEND_CONFIG(dma),
-                                       MDP4_DMA_CURSOR_BLEND_CONFIG_FORMAT(CURSOR_ARGB));
+                       mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_BASE(dma),
+                                       mdp4_kms->blank_cursor_iova);
                }
 
                /* and drop the iova ref + obj rev when done scanning out: */
@@ -574,11 +573,9 @@ static int mdp4_crtc_cursor_set(struct drm_crtc *crtc,
 
        if (old_bo) {
                /* drop our previous reference: */
-               msm_gem_put_iova(old_bo, mdp4_kms->id);
-               drm_gem_object_unreference_unlocked(old_bo);
+               drm_flip_work_queue(&mdp4_crtc->unref_cursor_work, old_bo);
        }
 
-       crtc_flush(crtc);
        request_pending(crtc, PENDING_CURSOR);
 
        return 0;
index c740ccd1cc67fb4d1a08ef6a65efc671daec8595..8edd531cb62166ad1291be18ffc26ba033cbc71d 100644 (file)
@@ -70,12 +70,12 @@ irqreturn_t mdp4_irq(struct msm_kms *kms)
 
        VERB("status=%08x", status);
 
+       mdp_dispatch_irqs(mdp_kms, status);
+
        for (id = 0; id < priv->num_crtcs; id++)
                if (status & mdp4_crtc_vblank(priv->crtcs[id]))
                        drm_handle_vblank(dev, id);
 
-       mdp_dispatch_irqs(mdp_kms, status);
-
        return IRQ_HANDLED;
 }
 
index 272e707c948704e6ff36cc8df263723fdbbd2a71..0bb4faa17523e0862d7f32df0939976424e82d6a 100644 (file)
@@ -144,6 +144,10 @@ static void mdp4_preclose(struct msm_kms *kms, struct drm_file *file)
 static void mdp4_destroy(struct msm_kms *kms)
 {
        struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
+       if (mdp4_kms->blank_cursor_iova)
+               msm_gem_put_iova(mdp4_kms->blank_cursor_bo, mdp4_kms->id);
+       if (mdp4_kms->blank_cursor_bo)
+               drm_gem_object_unreference(mdp4_kms->blank_cursor_bo);
        kfree(mdp4_kms);
 }
 
@@ -372,6 +376,23 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
                goto fail;
        }
 
+       mutex_lock(&dev->struct_mutex);
+       mdp4_kms->blank_cursor_bo = msm_gem_new(dev, SZ_16K, MSM_BO_WC);
+       mutex_unlock(&dev->struct_mutex);
+       if (IS_ERR(mdp4_kms->blank_cursor_bo)) {
+               ret = PTR_ERR(mdp4_kms->blank_cursor_bo);
+               dev_err(dev->dev, "could not allocate blank-cursor bo: %d\n", ret);
+               mdp4_kms->blank_cursor_bo = NULL;
+               goto fail;
+       }
+
+       ret = msm_gem_get_iova(mdp4_kms->blank_cursor_bo, mdp4_kms->id,
+                       &mdp4_kms->blank_cursor_iova);
+       if (ret) {
+               dev_err(dev->dev, "could not pin blank-cursor bo: %d\n", ret);
+               goto fail;
+       }
+
        return kms;
 
 fail:
index 66a4d31aec80e010e5f5914705f05f7f346dc68c..715520c54cdec48f93750da0843213878aaabf86 100644 (file)
@@ -44,6 +44,10 @@ struct mdp4_kms {
        struct clk *lut_clk;
 
        struct mdp_irq error_handler;
+
+       /* empty/blank cursor bo to use when cursor is "disabled" */
+       struct drm_gem_object *blank_cursor_bo;
+       uint32_t blank_cursor_iova;
 };
 #define to_mdp4_kms(x) container_of(x, struct mdp4_kms, base)
 
index 353d494a497f22c96e0f51d2049cd765ad595f07..f2b985bc2adf41f8330dd4bfb8dcafdb30b43c53 100644 (file)
@@ -71,11 +71,11 @@ static void mdp5_irq_mdp(struct mdp_kms *mdp_kms)
 
        VERB("status=%08x", status);
 
+       mdp_dispatch_irqs(mdp_kms, status);
+
        for (id = 0; id < priv->num_crtcs; id++)
                if (status & mdp5_crtc_vblank(priv->crtcs[id]))
                        drm_handle_vblank(dev, id);
-
-       mdp_dispatch_irqs(mdp_kms, status);
 }
 
 irqreturn_t mdp5_irq(struct msm_kms *kms)
index 6c6d7d4c9b4e77848994222f9bd5bf2b26b6a043..a752ab83b8104124a232d3e6701846c661fabfa3 100644 (file)
@@ -62,11 +62,8 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
        dma_addr_t paddr;
        int ret, size;
 
-       /* only doing ARGB32 since this is what is needed to alpha-blend
-        * with video overlays:
-        */
        sizes->surface_bpp = 32;
-       sizes->surface_depth = 32;
+       sizes->surface_depth = 24;
 
        DBG("create fbdev: %dx%d@%d (%dx%d)", sizes->surface_width,
                        sizes->surface_height, sizes->surface_bpp,
index 3da8264d3039017bd358ccaca48197356f932f68..bb8026daebc9426759d2bb31f2a6360dfed606a3 100644 (file)
@@ -118,8 +118,10 @@ static void put_pages(struct drm_gem_object *obj)
 
                if (iommu_present(&platform_bus_type))
                        drm_gem_put_pages(obj, msm_obj->pages, true, false);
-               else
+               else {
                        drm_mm_remove_node(msm_obj->vram_node);
+                       drm_free_large(msm_obj->pages);
+               }
 
                msm_obj->pages = NULL;
        }
index 15936524f226ca46b9631cadf43022aa2cc81d31..bc0119fb6c12a9373e1bd282886d38f8ed858135 100644 (file)
@@ -209,6 +209,7 @@ void radeon_dp_aux_init(struct radeon_connector *radeon_connector)
 {
        int ret;
 
+       radeon_connector->ddc_bus->rec.hpd = radeon_connector->hpd.hpd;
        radeon_connector->ddc_bus->aux.dev = radeon_connector->base.kdev;
        radeon_connector->ddc_bus->aux.transfer = radeon_dp_aux_transfer;
        ret = drm_dp_aux_register_i2c_bus(&radeon_connector->ddc_bus->aux);
index 89b4afa5041c322a15afc67bfbba7f7cf2cef8d6..f7e46cf682afdcbe051cb4a6ecf6dfe66570a5c3 100644 (file)
@@ -597,7 +597,7 @@ int cik_sdma_ring_test(struct radeon_device *rdev,
        tmp = 0xCAFEDEAD;
        writel(tmp, ptr);
 
-       r = radeon_ring_lock(rdev, ring, 4);
+       r = radeon_ring_lock(rdev, ring, 5);
        if (r) {
                DRM_ERROR("radeon: dma failed to lock ring %d (%d).\n", ring->idx, r);
                return r;
index cbf7e3269f84882d1352a44ea402fa5dc4cbba7c..9c61b74ef4415cbf1f7bce501b268662d80da8de 100644 (file)
@@ -158,16 +158,18 @@ u32 r600_dpm_get_vblank_time(struct radeon_device *rdev)
        u32 line_time_us, vblank_lines;
        u32 vblank_time_us = 0xffffffff; /* if the displays are off, vblank time is max */
 
-       list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
-               radeon_crtc = to_radeon_crtc(crtc);
-               if (crtc->enabled && radeon_crtc->enabled && radeon_crtc->hw_mode.clock) {
-                       line_time_us = (radeon_crtc->hw_mode.crtc_htotal * 1000) /
-                               radeon_crtc->hw_mode.clock;
-                       vblank_lines = radeon_crtc->hw_mode.crtc_vblank_end -
-                               radeon_crtc->hw_mode.crtc_vdisplay +
-                               (radeon_crtc->v_border * 2);
-                       vblank_time_us = vblank_lines * line_time_us;
-                       break;
+       if (rdev->num_crtc && rdev->mode_info.mode_config_initialized) {
+               list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+                       radeon_crtc = to_radeon_crtc(crtc);
+                       if (crtc->enabled && radeon_crtc->enabled && radeon_crtc->hw_mode.clock) {
+                               line_time_us = (radeon_crtc->hw_mode.crtc_htotal * 1000) /
+                                       radeon_crtc->hw_mode.clock;
+                               vblank_lines = radeon_crtc->hw_mode.crtc_vblank_end -
+                                       radeon_crtc->hw_mode.crtc_vdisplay +
+                                       (radeon_crtc->v_border * 2);
+                               vblank_time_us = vblank_lines * line_time_us;
+                               break;
+                       }
                }
        }
 
@@ -181,14 +183,15 @@ u32 r600_dpm_get_vrefresh(struct radeon_device *rdev)
        struct radeon_crtc *radeon_crtc;
        u32 vrefresh = 0;
 
-       list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
-               radeon_crtc = to_radeon_crtc(crtc);
-               if (crtc->enabled && radeon_crtc->enabled && radeon_crtc->hw_mode.clock) {
-                       vrefresh = radeon_crtc->hw_mode.vrefresh;
-                       break;
+       if (rdev->num_crtc && rdev->mode_info.mode_config_initialized) {
+               list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+                       radeon_crtc = to_radeon_crtc(crtc);
+                       if (crtc->enabled && radeon_crtc->enabled && radeon_crtc->hw_mode.clock) {
+                               vrefresh = radeon_crtc->hw_mode.vrefresh;
+                               break;
+                       }
                }
        }
-
        return vrefresh;
 }
 
index dedea72f48c45b95022ec9bcb60cf85e8073e187..a9fb0d016d387683a1d4bb96f77c046b7cd804b7 100644 (file)
@@ -528,6 +528,13 @@ static bool radeon_atpx_detect(void)
                has_atpx |= (radeon_atpx_pci_probe_handle(pdev) == true);
        }
 
+       /* some newer PX laptops mark the dGPU as a non-VGA display device */
+       while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_OTHER << 8, pdev)) != NULL) {
+               vga_count++;
+
+               has_atpx |= (radeon_atpx_pci_probe_handle(pdev) == true);
+       }
+
        if (has_atpx && vga_count == 2) {
                acpi_get_name(radeon_atpx_priv.atpx.handle, ACPI_FULL_PATHNAME, &buffer);
                printk(KERN_INFO "VGA switcheroo: detected switching method %s handle\n",
index 2f7cbb901fb18c04c30c48e7357c23d01edc2529..8d99d5ee8014c4f23e031a31cbeab85bde33d81a 100644 (file)
@@ -839,6 +839,38 @@ static void avivo_reduce_ratio(unsigned *nom, unsigned *den,
        }
 }
 
+/**
+ * avivo_get_fb_ref_div - feedback and ref divider calculation
+ *
+ * @nom: nominator
+ * @den: denominator
+ * @post_div: post divider
+ * @fb_div_max: feedback divider maximum
+ * @ref_div_max: reference divider maximum
+ * @fb_div: resulting feedback divider
+ * @ref_div: resulting reference divider
+ *
+ * Calculate feedback and reference divider for a given post divider. Makes
+ * sure we stay within the limits.
+ */
+static void avivo_get_fb_ref_div(unsigned nom, unsigned den, unsigned post_div,
+                                unsigned fb_div_max, unsigned ref_div_max,
+                                unsigned *fb_div, unsigned *ref_div)
+{
+       /* limit reference * post divider to a maximum */
+       ref_div_max = min(210 / post_div, ref_div_max);
+
+       /* get matching reference and feedback divider */
+       *ref_div = min(max(DIV_ROUND_CLOSEST(den, post_div), 1u), ref_div_max);
+       *fb_div = DIV_ROUND_CLOSEST(nom * *ref_div * post_div, den);
+
+       /* limit fb divider to its maximum */
+        if (*fb_div > fb_div_max) {
+               *ref_div = DIV_ROUND_CLOSEST(*ref_div * fb_div_max, *fb_div);
+               *fb_div = fb_div_max;
+       }
+}
+
 /**
  * radeon_compute_pll_avivo - compute PLL paramaters
  *
@@ -860,6 +892,9 @@ void radeon_compute_pll_avivo(struct radeon_pll *pll,
                              u32 *ref_div_p,
                              u32 *post_div_p)
 {
+       unsigned target_clock = pll->flags & RADEON_PLL_USE_FRAC_FB_DIV ?
+               freq : freq / 10;
+
        unsigned fb_div_min, fb_div_max, fb_div;
        unsigned post_div_min, post_div_max, post_div;
        unsigned ref_div_min, ref_div_max, ref_div;
@@ -880,14 +915,18 @@ void radeon_compute_pll_avivo(struct radeon_pll *pll,
                ref_div_min = pll->reference_div;
        else
                ref_div_min = pll->min_ref_div;
-       ref_div_max = pll->max_ref_div;
+
+       if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV &&
+           pll->flags & RADEON_PLL_USE_REF_DIV)
+               ref_div_max = pll->reference_div;
+       else
+               ref_div_max = pll->max_ref_div;
 
        /* determine allowed post divider range */
        if (pll->flags & RADEON_PLL_USE_POST_DIV) {
                post_div_min = pll->post_div;
                post_div_max = pll->post_div;
        } else {
-               unsigned target_clock = freq / 10;
                unsigned vco_min, vco_max;
 
                if (pll->flags & RADEON_PLL_IS_LCD) {
@@ -898,6 +937,11 @@ void radeon_compute_pll_avivo(struct radeon_pll *pll,
                        vco_max = pll->pll_out_max;
                }
 
+               if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV) {
+                       vco_min *= 10;
+                       vco_max *= 10;
+               }
+
                post_div_min = vco_min / target_clock;
                if ((target_clock * post_div_min) < vco_min)
                        ++post_div_min;
@@ -912,7 +956,7 @@ void radeon_compute_pll_avivo(struct radeon_pll *pll,
        }
 
        /* represent the searched ratio as fractional number */
-       nom = pll->flags & RADEON_PLL_USE_FRAC_FB_DIV ? freq : freq / 10;
+       nom = target_clock;
        den = pll->reference_freq;
 
        /* reduce the numbers to a simpler ratio */
@@ -926,7 +970,12 @@ void radeon_compute_pll_avivo(struct radeon_pll *pll,
        diff_best = ~0;
 
        for (post_div = post_div_min; post_div <= post_div_max; ++post_div) {
-               unsigned diff = abs(den - den / post_div * post_div);
+               unsigned diff;
+               avivo_get_fb_ref_div(nom, den, post_div, fb_div_max,
+                                    ref_div_max, &fb_div, &ref_div);
+               diff = abs(target_clock - (pll->reference_freq * fb_div) /
+                       (ref_div * post_div));
+
                if (diff < diff_best || (diff == diff_best &&
                    !(pll->flags & RADEON_PLL_PREFER_MINM_OVER_MAXP))) {
 
@@ -936,28 +985,9 @@ void radeon_compute_pll_avivo(struct radeon_pll *pll,
        }
        post_div = post_div_best;
 
-       /* limit reference * post divider to a maximum */
-       ref_div_max = min(210 / post_div, ref_div_max);
-
-       /* get matching reference and feedback divider */
-       ref_div = max(DIV_ROUND_CLOSEST(den, post_div), 1u);
-       fb_div = DIV_ROUND_CLOSEST(nom * ref_div * post_div, den);
-
-       /* we're almost done, but reference and feedback
-          divider might be to large now */
-
-       nom = fb_div;
-       den = ref_div;
-
-        if (fb_div > fb_div_max) {
-               ref_div = DIV_ROUND_CLOSEST(den * fb_div_max, nom);
-               fb_div = fb_div_max;
-       }
-
-       if (ref_div > ref_div_max) {
-               ref_div = ref_div_max;
-               fb_div = DIV_ROUND_CLOSEST(nom * ref_div_max, den);
-       }
+       /* get the feedback and reference divider for the optimal value */
+       avivo_get_fb_ref_div(nom, den, post_div, fb_div_max, ref_div_max,
+                            &fb_div, &ref_div);
 
        /* reduce the numbers to a simpler ratio once more */
        /* this also makes sure that the reference divider is large enough */
@@ -979,7 +1009,7 @@ void radeon_compute_pll_avivo(struct radeon_pll *pll,
        *post_div_p = post_div;
 
        DRM_DEBUG_KMS("%d - %d, pll dividers - fb: %d.%d ref: %d, post %d\n",
-                     freq, *dot_clock_p, *fb_div_p, *frac_fb_div_p,
+                     freq, *dot_clock_p * 10, *fb_div_p, *frac_fb_div_p,
                      ref_div, post_div);
 }
 
index fb3d13f693ddb18fd32e8a4ef1a486305c615eed..0cc47f12d9957d916b41acf4d3874fa062313175 100644 (file)
@@ -107,11 +107,9 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
                flags |= RADEON_IS_PCI;
        }
 
-       if (radeon_runtime_pm == 1)
-               flags |= RADEON_IS_PX;
-       else if ((radeon_runtime_pm == -1) &&
-                radeon_has_atpx() &&
-                ((flags & RADEON_IS_IGP) == 0))
+       if ((radeon_runtime_pm != 0) &&
+           radeon_has_atpx() &&
+           ((flags & RADEON_IS_IGP) == 0))
                flags |= RADEON_IS_PX;
 
        /* radeon_device_init should report only fatal error
index ee738a524639e41e75c7af5279c583e4b3c7ee10..6fac8efe8340e2e99f83965201bab056de2997de 100644 (file)
@@ -603,7 +603,6 @@ static const struct attribute_group *hwmon_groups[] = {
 static int radeon_hwmon_init(struct radeon_device *rdev)
 {
        int err = 0;
-       struct device *hwmon_dev;
 
        switch (rdev->pm.int_thermal_type) {
        case THERMAL_TYPE_RV6XX:
@@ -616,11 +615,11 @@ static int radeon_hwmon_init(struct radeon_device *rdev)
        case THERMAL_TYPE_KV:
                if (rdev->asic->pm.get_temperature == NULL)
                        return err;
-               hwmon_dev = hwmon_device_register_with_groups(rdev->dev,
-                                                             "radeon", rdev,
-                                                             hwmon_groups);
-               if (IS_ERR(hwmon_dev)) {
-                       err = PTR_ERR(hwmon_dev);
+               rdev->pm.int_hwmon_dev = hwmon_device_register_with_groups(rdev->dev,
+                                                                          "radeon", rdev,
+                                                                          hwmon_groups);
+               if (IS_ERR(rdev->pm.int_hwmon_dev)) {
+                       err = PTR_ERR(rdev->pm.int_hwmon_dev);
                        dev_err(rdev->dev,
                                "Unable to register hwmon device: %d\n", err);
                }
@@ -632,6 +631,12 @@ static int radeon_hwmon_init(struct radeon_device *rdev)
        return err;
 }
 
+static void radeon_hwmon_fini(struct radeon_device *rdev)
+{
+       if (rdev->pm.int_hwmon_dev)
+               hwmon_device_unregister(rdev->pm.int_hwmon_dev);
+}
+
 static void radeon_dpm_thermal_work_handler(struct work_struct *work)
 {
        struct radeon_device *rdev =
@@ -1257,6 +1262,7 @@ int radeon_pm_init(struct radeon_device *rdev)
        case CHIP_RV670:
        case CHIP_RS780:
        case CHIP_RS880:
+       case CHIP_RV770:
        case CHIP_BARTS:
        case CHIP_TURKS:
        case CHIP_CAICOS:
@@ -1273,7 +1279,6 @@ int radeon_pm_init(struct radeon_device *rdev)
                else
                        rdev->pm.pm_method = PM_METHOD_PROFILE;
                break;
-       case CHIP_RV770:
        case CHIP_RV730:
        case CHIP_RV710:
        case CHIP_RV740:
@@ -1353,6 +1358,8 @@ static void radeon_pm_fini_old(struct radeon_device *rdev)
                device_remove_file(rdev->dev, &dev_attr_power_method);
        }
 
+       radeon_hwmon_fini(rdev);
+
        if (rdev->pm.power_state)
                kfree(rdev->pm.power_state);
 }
@@ -1372,6 +1379,8 @@ static void radeon_pm_fini_dpm(struct radeon_device *rdev)
        }
        radeon_dpm_fini(rdev);
 
+       radeon_hwmon_fini(rdev);
+
        if (rdev->pm.power_state)
                kfree(rdev->pm.power_state);
 }
@@ -1397,12 +1406,14 @@ static void radeon_pm_compute_clocks_old(struct radeon_device *rdev)
 
        rdev->pm.active_crtcs = 0;
        rdev->pm.active_crtc_count = 0;
-       list_for_each_entry(crtc,
-               &ddev->mode_config.crtc_list, head) {
-               radeon_crtc = to_radeon_crtc(crtc);
-               if (radeon_crtc->enabled) {
-                       rdev->pm.active_crtcs |= (1 << radeon_crtc->crtc_id);
-                       rdev->pm.active_crtc_count++;
+       if (rdev->num_crtc && rdev->mode_info.mode_config_initialized) {
+               list_for_each_entry(crtc,
+                                   &ddev->mode_config.crtc_list, head) {
+                       radeon_crtc = to_radeon_crtc(crtc);
+                       if (radeon_crtc->enabled) {
+                               rdev->pm.active_crtcs |= (1 << radeon_crtc->crtc_id);
+                               rdev->pm.active_crtc_count++;
+                       }
                }
        }
 
@@ -1469,12 +1480,14 @@ static void radeon_pm_compute_clocks_dpm(struct radeon_device *rdev)
        /* update active crtc counts */
        rdev->pm.dpm.new_active_crtcs = 0;
        rdev->pm.dpm.new_active_crtc_count = 0;
-       list_for_each_entry(crtc,
-               &ddev->mode_config.crtc_list, head) {
-               radeon_crtc = to_radeon_crtc(crtc);
-               if (crtc->enabled) {
-                       rdev->pm.dpm.new_active_crtcs |= (1 << radeon_crtc->crtc_id);
-                       rdev->pm.dpm.new_active_crtc_count++;
+       if (rdev->num_crtc && rdev->mode_info.mode_config_initialized) {
+               list_for_each_entry(crtc,
+                                   &ddev->mode_config.crtc_list, head) {
+                       radeon_crtc = to_radeon_crtc(crtc);
+                       if (crtc->enabled) {
+                               rdev->pm.dpm.new_active_crtcs |= (1 << radeon_crtc->crtc_id);
+                               rdev->pm.dpm.new_active_crtc_count++;
+                       }
                }
        }
 
index 36c717af6cf90830324a3538ac05dfd4306f3ccb..edb871d7d395cbb4af140120953b9e854d72aae7 100644 (file)
@@ -312,7 +312,7 @@ static void tegra_crtc_disable(struct drm_crtc *crtc)
        struct drm_device *drm = crtc->dev;
        struct drm_plane *plane;
 
-       list_for_each_entry(plane, &drm->mode_config.plane_list, head) {
+       drm_for_each_legacy_plane(plane, &drm->mode_config.plane_list) {
                if (plane->crtc == crtc) {
                        tegra_plane_disable(plane);
                        plane->crtc = NULL;
index 931490b9cfed04b1365a87fb2d83c0526e114fdd..87df0b3674fda203c96baef3ff3030a87424a800 100644 (file)
@@ -1214,14 +1214,36 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv,
                SVGA3dCmdSurfaceDMA dma;
        } *cmd;
        int ret;
+       SVGA3dCmdSurfaceDMASuffix *suffix;
+       uint32_t bo_size;
 
        cmd = container_of(header, struct vmw_dma_cmd, header);
+       suffix = (SVGA3dCmdSurfaceDMASuffix *)((unsigned long) &cmd->dma +
+                                              header->size - sizeof(*suffix));
+
+       /* Make sure device and verifier stays in sync. */
+       if (unlikely(suffix->suffixSize != sizeof(*suffix))) {
+               DRM_ERROR("Invalid DMA suffix size.\n");
+               return -EINVAL;
+       }
+
        ret = vmw_translate_guest_ptr(dev_priv, sw_context,
                                      &cmd->dma.guest.ptr,
                                      &vmw_bo);
        if (unlikely(ret != 0))
                return ret;
 
+       /* Make sure DMA doesn't cross BO boundaries. */
+       bo_size = vmw_bo->base.num_pages * PAGE_SIZE;
+       if (unlikely(cmd->dma.guest.ptr.offset > bo_size)) {
+               DRM_ERROR("Invalid DMA offset.\n");
+               return -EINVAL;
+       }
+
+       bo_size -= cmd->dma.guest.ptr.offset;
+       if (unlikely(suffix->maximumOffset > bo_size))
+               suffix->maximumOffset = bo_size;
+
        ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
                                user_surface_converter, &cmd->dma.host.sid,
                                NULL);
index 6d02e3b063756f6225078df7e00f981478ef5f45..d76f0b70c6e09a0dd4bf52d77df7aa85651377c9 100644 (file)
@@ -365,12 +365,12 @@ static int get_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev)
                if (cpu_has_tjmax(c))
                        dev_warn(dev, "Unable to read TjMax from CPU %u\n", id);
        } else {
-               val = (eax >> 16) & 0x7f;
+               val = (eax >> 16) & 0xff;
                /*
                 * If the TjMax is not plausible, an assumption
                 * will be used
                 */
-               if (val >= 85) {
+               if (val) {
                        dev_dbg(dev, "TjMax is %d degrees C\n", val);
                        return val * 1000;
                }
index c104cc32989df7af8ce972b6a5803dfdf5dbaf61..c9cddf5f056bbd6fe014dbd6dfc8dc25eac949d6 100644 (file)
@@ -1,4 +1,4 @@
-/*
+    /*
  * Driver for Linear Technology LTC2945 I2C Power Monitor
  *
  * Copyright (c) 2014 Guenter Roeck
@@ -314,8 +314,8 @@ static ssize_t ltc2945_reset_history(struct device *dev,
                reg = LTC2945_MAX_ADIN_H;
                break;
        default:
-               BUG();
-               break;
+               WARN_ONCE(1, "Bad register: 0x%x\n", reg);
+               return -EINVAL;
        }
        /* Reset maximum */
        ret = regmap_bulk_write(regmap, reg, buf_max, num_regs);
index d867e6bb2be1f7e1b1ebb41788c27f568bd7c579..8242b75d96c87e9b69a079f00e871e70f1867c7f 100644 (file)
 struct vexpress_hwmon_data {
        struct device *hwmon_dev;
        struct vexpress_config_func *func;
+       const char *name;
 };
 
 static ssize_t vexpress_hwmon_name_show(struct device *dev,
                struct device_attribute *dev_attr, char *buffer)
 {
-       const char *compatible = of_get_property(dev->of_node, "compatible",
-                       NULL);
+       struct vexpress_hwmon_data *data = dev_get_drvdata(dev);
 
-       return sprintf(buffer, "%s\n", compatible);
+       return sprintf(buffer, "%s\n", data->name);
 }
 
 static ssize_t vexpress_hwmon_label_show(struct device *dev,
@@ -43,9 +43,6 @@ static ssize_t vexpress_hwmon_label_show(struct device *dev,
 {
        const char *label = of_get_property(dev->of_node, "label", NULL);
 
-       if (!label)
-               return -ENOENT;
-
        return snprintf(buffer, PAGE_SIZE, "%s\n", label);
 }
 
@@ -84,6 +81,20 @@ static ssize_t vexpress_hwmon_u64_show(struct device *dev,
                        to_sensor_dev_attr(dev_attr)->index));
 }
 
+static umode_t vexpress_hwmon_attr_is_visible(struct kobject *kobj,
+               struct attribute *attr, int index)
+{
+       struct device *dev = kobj_to_dev(kobj);
+       struct device_attribute *dev_attr = container_of(attr,
+                               struct device_attribute, attr);
+
+       if (dev_attr->show == vexpress_hwmon_label_show &&
+                       !of_get_property(dev->of_node, "label", NULL))
+               return 0;
+
+       return attr->mode;
+}
+
 static DEVICE_ATTR(name, S_IRUGO, vexpress_hwmon_name_show, NULL);
 
 #define VEXPRESS_HWMON_ATTRS(_name, _label_attr, _input_attr)  \
@@ -94,14 +105,27 @@ struct attribute *vexpress_hwmon_attrs_##_name[] = {               \
        NULL                                                    \
 }
 
+struct vexpress_hwmon_type {
+       const char *name;
+       const struct attribute_group **attr_groups;
+};
+
 #if !defined(CONFIG_REGULATOR_VEXPRESS)
 static DEVICE_ATTR(in1_label, S_IRUGO, vexpress_hwmon_label_show, NULL);
 static SENSOR_DEVICE_ATTR(in1_input, S_IRUGO, vexpress_hwmon_u32_show,
                NULL, 1000);
 static VEXPRESS_HWMON_ATTRS(volt, in1_label, in1_input);
 static struct attribute_group vexpress_hwmon_group_volt = {
+       .is_visible = vexpress_hwmon_attr_is_visible,
        .attrs = vexpress_hwmon_attrs_volt,
 };
+static struct vexpress_hwmon_type vexpress_hwmon_volt = {
+       .name = "vexpress_volt",
+       .attr_groups = (const struct attribute_group *[]) {
+               &vexpress_hwmon_group_volt,
+               NULL,
+       },
+};
 #endif
 
 static DEVICE_ATTR(curr1_label, S_IRUGO, vexpress_hwmon_label_show, NULL);
@@ -109,52 +133,84 @@ static SENSOR_DEVICE_ATTR(curr1_input, S_IRUGO, vexpress_hwmon_u32_show,
                NULL, 1000);
 static VEXPRESS_HWMON_ATTRS(amp, curr1_label, curr1_input);
 static struct attribute_group vexpress_hwmon_group_amp = {
+       .is_visible = vexpress_hwmon_attr_is_visible,
        .attrs = vexpress_hwmon_attrs_amp,
 };
+static struct vexpress_hwmon_type vexpress_hwmon_amp = {
+       .name = "vexpress_amp",
+       .attr_groups = (const struct attribute_group *[]) {
+               &vexpress_hwmon_group_amp,
+               NULL
+       },
+};
 
 static DEVICE_ATTR(temp1_label, S_IRUGO, vexpress_hwmon_label_show, NULL);
 static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, vexpress_hwmon_u32_show,
                NULL, 1000);
 static VEXPRESS_HWMON_ATTRS(temp, temp1_label, temp1_input);
 static struct attribute_group vexpress_hwmon_group_temp = {
+       .is_visible = vexpress_hwmon_attr_is_visible,
        .attrs = vexpress_hwmon_attrs_temp,
 };
+static struct vexpress_hwmon_type vexpress_hwmon_temp = {
+       .name = "vexpress_temp",
+       .attr_groups = (const struct attribute_group *[]) {
+               &vexpress_hwmon_group_temp,
+               NULL
+       },
+};
 
 static DEVICE_ATTR(power1_label, S_IRUGO, vexpress_hwmon_label_show, NULL);
 static SENSOR_DEVICE_ATTR(power1_input, S_IRUGO, vexpress_hwmon_u32_show,
                NULL, 1);
 static VEXPRESS_HWMON_ATTRS(power, power1_label, power1_input);
 static struct attribute_group vexpress_hwmon_group_power = {
+       .is_visible = vexpress_hwmon_attr_is_visible,
        .attrs = vexpress_hwmon_attrs_power,
 };
+static struct vexpress_hwmon_type vexpress_hwmon_power = {
+       .name = "vexpress_power",
+       .attr_groups = (const struct attribute_group *[]) {
+               &vexpress_hwmon_group_power,
+               NULL
+       },
+};
 
 static DEVICE_ATTR(energy1_label, S_IRUGO, vexpress_hwmon_label_show, NULL);
 static SENSOR_DEVICE_ATTR(energy1_input, S_IRUGO, vexpress_hwmon_u64_show,
                NULL, 1);
 static VEXPRESS_HWMON_ATTRS(energy, energy1_label, energy1_input);
 static struct attribute_group vexpress_hwmon_group_energy = {
+       .is_visible = vexpress_hwmon_attr_is_visible,
        .attrs = vexpress_hwmon_attrs_energy,
 };
+static struct vexpress_hwmon_type vexpress_hwmon_energy = {
+       .name = "vexpress_energy",
+       .attr_groups = (const struct attribute_group *[]) {
+               &vexpress_hwmon_group_energy,
+               NULL
+       },
+};
 
 static struct of_device_id vexpress_hwmon_of_match[] = {
 #if !defined(CONFIG_REGULATOR_VEXPRESS)
        {
                .compatible = "arm,vexpress-volt",
-               .data = &vexpress_hwmon_group_volt,
+               .data = &vexpress_hwmon_volt,
        },
 #endif
        {
                .compatible = "arm,vexpress-amp",
-               .data = &vexpress_hwmon_group_amp,
+               .data = &vexpress_hwmon_amp,
        }, {
                .compatible = "arm,vexpress-temp",
-               .data = &vexpress_hwmon_group_temp,
+               .data = &vexpress_hwmon_temp,
        }, {
                .compatible = "arm,vexpress-power",
-               .data = &vexpress_hwmon_group_power,
+               .data = &vexpress_hwmon_power,
        }, {
                .compatible = "arm,vexpress-energy",
-               .data = &vexpress_hwmon_group_energy,
+               .data = &vexpress_hwmon_energy,
        },
        {}
 };
@@ -165,6 +221,7 @@ static int vexpress_hwmon_probe(struct platform_device *pdev)
        int err;
        const struct of_device_id *match;
        struct vexpress_hwmon_data *data;
+       const struct vexpress_hwmon_type *type;
 
        data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
        if (!data)
@@ -174,12 +231,14 @@ static int vexpress_hwmon_probe(struct platform_device *pdev)
        match = of_match_device(vexpress_hwmon_of_match, &pdev->dev);
        if (!match)
                return -ENODEV;
+       type = match->data;
+       data->name = type->name;
 
        data->func = vexpress_config_func_get_by_dev(&pdev->dev);
        if (!data->func)
                return -ENODEV;
 
-       err = sysfs_create_group(&pdev->dev.kobj, match->data);
+       err = sysfs_create_groups(&pdev->dev.kobj, type->attr_groups);
        if (err)
                goto error;
 
index a43220c2e3d943a3437e29df1352aca30593e4cf..4d140bbbe1006c172cecd2fd5313778e8eb5db17 100644 (file)
@@ -750,9 +750,10 @@ void intel_idle_state_table_update(void)
                        if (package_num + 1 > num_sockets) {
                                num_sockets = package_num + 1;
 
-                               if (num_sockets > 4)
+                               if (num_sockets > 4) {
                                        cpuidle_state_table = ivt_cstates_8s;
                                        return;
+                               }
                        }
                }
 
index d86196cfe4b47091add5d756da6d3dbf7fded9eb..24c28e3f93a3c960b7fd5c51e9f5348e003ababf 100644 (file)
@@ -106,7 +106,7 @@ config AT91_ADC
          Say yes here to build support for Atmel AT91 ADC.
 
 config EXYNOS_ADC
-       bool "Exynos ADC driver support"
+       tristate "Exynos ADC driver support"
        depends on OF
        help
          Core support for the ADC block found in the Samsung EXYNOS series
@@ -114,7 +114,7 @@ config EXYNOS_ADC
          this resource.
 
 config LP8788_ADC
-       bool "LP8788 ADC driver"
+       tristate "LP8788 ADC driver"
        depends on MFD_LP8788
        help
          Say yes here to build support for TI LP8788 ADC.
index 5b1aa027c034b09c1569047231716ea66f75f919..89777ed9abd858773b128c3a4d9fd6b34bdd9584 100644 (file)
@@ -765,14 +765,17 @@ static int at91_adc_probe_pdata(struct at91_adc_state *st,
        if (!pdata)
                return -EINVAL;
 
+       st->caps = (struct at91_adc_caps *)
+                       platform_get_device_id(pdev)->driver_data;
+
        st->use_external = pdata->use_external_triggers;
        st->vref_mv = pdata->vref;
        st->channels_mask = pdata->channels_used;
-       st->num_channels = pdata->num_channels;
+       st->num_channels = st->caps->num_channels;
        st->startup_time = pdata->startup_time;
        st->trigger_number = pdata->trigger_number;
        st->trigger_list = pdata->trigger_list;
-       st->registers = pdata->registers;
+       st->registers = &st->caps->registers;
 
        return 0;
 }
@@ -1004,8 +1007,11 @@ static int at91_adc_probe(struct platform_device *pdev)
         * the best converted final value between two channels selection
         * The formula thus is : Sample and Hold Time = (shtim + 1) / ADCClock
         */
-       shtim = round_up((st->sample_hold_time * adc_clk_khz /
-                         1000) - 1, 1);
+       if (st->sample_hold_time > 0)
+               shtim = round_up((st->sample_hold_time * adc_clk_khz / 1000)
+                                - 1, 1);
+       else
+               shtim = 0;
 
        reg = AT91_ADC_PRESCAL_(prsc) & st->registers->mr_prescal_mask;
        reg |= AT91_ADC_STARTUP_(ticks) & st->registers->mr_startup_mask;
@@ -1101,7 +1107,6 @@ static int at91_adc_remove(struct platform_device *pdev)
        return 0;
 }
 
-#ifdef CONFIG_OF
 static struct at91_adc_caps at91sam9260_caps = {
        .calc_startup_ticks = calc_startup_ticks_9260,
        .num_channels = 4,
@@ -1154,11 +1159,27 @@ static const struct of_device_id at91_adc_dt_ids[] = {
        {},
 };
 MODULE_DEVICE_TABLE(of, at91_adc_dt_ids);
-#endif
+
+static const struct platform_device_id at91_adc_ids[] = {
+       {
+               .name = "at91sam9260-adc",
+               .driver_data = (unsigned long)&at91sam9260_caps,
+       }, {
+               .name = "at91sam9g45-adc",
+               .driver_data = (unsigned long)&at91sam9g45_caps,
+       }, {
+               .name = "at91sam9x5-adc",
+               .driver_data = (unsigned long)&at91sam9x5_caps,
+       }, {
+               /* terminator */
+       }
+};
+MODULE_DEVICE_TABLE(platform, at91_adc_ids);
 
 static struct platform_driver at91_adc_driver = {
        .probe = at91_adc_probe,
        .remove = at91_adc_remove,
+       .id_table = at91_adc_ids,
        .driver = {
                   .name = DRIVER_NAME,
                   .of_match_table = of_match_ptr(at91_adc_dt_ids),
index d25b262193a7d4bccad40a42b79685cf4956e6fc..affa93f517893b1c6db2ece1c1e83afeea1828aa 100644 (file)
@@ -344,7 +344,7 @@ static int exynos_adc_probe(struct platform_device *pdev)
 
        exynos_adc_hw_init(info);
 
-       ret = of_platform_populate(np, exynos_adc_match, NULL, &pdev->dev);
+       ret = of_platform_populate(np, exynos_adc_match, NULL, &indio_dev->dev);
        if (ret < 0) {
                dev_err(&pdev->dev, "failed adding child nodes\n");
                goto err_of_populate;
@@ -353,7 +353,7 @@ static int exynos_adc_probe(struct platform_device *pdev)
        return 0;
 
 err_of_populate:
-       device_for_each_child(&pdev->dev, NULL,
+       device_for_each_child(&indio_dev->dev, NULL,
                                exynos_adc_remove_devices);
        regulator_disable(info->vdd);
        clk_disable_unprepare(info->clk);
@@ -369,7 +369,7 @@ static int exynos_adc_remove(struct platform_device *pdev)
        struct iio_dev *indio_dev = platform_get_drvdata(pdev);
        struct exynos_adc *info = iio_priv(indio_dev);
 
-       device_for_each_child(&pdev->dev, NULL,
+       device_for_each_child(&indio_dev->dev, NULL,
                                exynos_adc_remove_devices);
        regulator_disable(info->vdd);
        clk_disable_unprepare(info->clk);
index cb9f96b446a55cd138f129db3443d44821b32781..d8ad606c7cd0c7e054d3764afe1b9edb069aaed7 100644 (file)
@@ -660,6 +660,7 @@ static int inv_mpu_probe(struct i2c_client *client,
 {
        struct inv_mpu6050_state *st;
        struct iio_dev *indio_dev;
+       struct inv_mpu6050_platform_data *pdata;
        int result;
 
        if (!i2c_check_functionality(client->adapter,
@@ -672,8 +673,10 @@ static int inv_mpu_probe(struct i2c_client *client,
 
        st = iio_priv(indio_dev);
        st->client = client;
-       st->plat_data = *(struct inv_mpu6050_platform_data
-                               *)dev_get_platdata(&client->dev);
+       pdata = (struct inv_mpu6050_platform_data
+                       *)dev_get_platdata(&client->dev);
+       if (pdata)
+               st->plat_data = *pdata;
        /* power is turned on inside check chip type*/
        result = inv_check_and_setup_chip(st, id);
        if (result)
index e108f2a9d827fca1e37932f18b5232ca4e9b65ea..e472cff6eeae38168331bf8b13a6583e9756842e 100644 (file)
@@ -165,7 +165,8 @@ static ssize_t iio_scan_el_show(struct device *dev,
        int ret;
        struct iio_dev *indio_dev = dev_to_iio_dev(dev);
 
-       ret = test_bit(to_iio_dev_attr(attr)->address,
+       /* Ensure ret is 0 or 1. */
+       ret = !!test_bit(to_iio_dev_attr(attr)->address,
                       indio_dev->buffer->scan_mask);
 
        return sprintf(buf, "%d\n", ret);
@@ -862,7 +863,8 @@ int iio_scan_mask_query(struct iio_dev *indio_dev,
        if (!buffer->scan_mask)
                return 0;
 
-       return test_bit(bit, buffer->scan_mask);
+       /* Ensure return value is 0 or 1. */
+       return !!test_bit(bit, buffer->scan_mask);
 };
 EXPORT_SYMBOL_GPL(iio_scan_mask_query);
 
index 47a6dbac2d0ca8b23dcea31158b43d3c3df3d2f0..d976e6ce60dbb22a57ee293b7baed954f648d2d3 100644 (file)
@@ -221,6 +221,7 @@ static int cm32181_read_raw(struct iio_dev *indio_dev,
                *val = cm32181->calibscale;
                return IIO_VAL_INT;
        case IIO_CHAN_INFO_INT_TIME:
+               *val = 0;
                ret = cm32181_read_als_it(cm32181, val2);
                return ret;
        }
index a45e07492db318a22171546291d2b590e0d0668f..39fc67e82138470a1f8523d7de2902f072603a6f 100644 (file)
@@ -652,7 +652,19 @@ static int cm36651_probe(struct i2c_client *client,
        cm36651->client = client;
        cm36651->ps_client = i2c_new_dummy(client->adapter,
                                                     CM36651_I2C_ADDR_PS);
+       if (!cm36651->ps_client) {
+               dev_err(&client->dev, "%s: new i2c device failed\n", __func__);
+               ret = -ENODEV;
+               goto error_disable_reg;
+       }
+
        cm36651->ara_client = i2c_new_dummy(client->adapter, CM36651_ARA);
+       if (!cm36651->ara_client) {
+               dev_err(&client->dev, "%s: new i2c device failed\n", __func__);
+               ret = -ENODEV;
+               goto error_i2c_unregister_ps;
+       }
+
        mutex_init(&cm36651->lock);
        indio_dev->dev.parent = &client->dev;
        indio_dev->channels = cm36651_channels;
@@ -664,7 +676,7 @@ static int cm36651_probe(struct i2c_client *client,
        ret = cm36651_setup_reg(cm36651);
        if (ret) {
                dev_err(&client->dev, "%s: register setup failed\n", __func__);
-               goto error_disable_reg;
+               goto error_i2c_unregister_ara;
        }
 
        ret = request_threaded_irq(client->irq, NULL, cm36651_irq_handler,
@@ -672,7 +684,7 @@ static int cm36651_probe(struct i2c_client *client,
                                                        "cm36651", indio_dev);
        if (ret) {
                dev_err(&client->dev, "%s: request irq failed\n", __func__);
-               goto error_disable_reg;
+               goto error_i2c_unregister_ara;
        }
 
        ret = iio_device_register(indio_dev);
@@ -685,6 +697,10 @@ static int cm36651_probe(struct i2c_client *client,
 
 error_free_irq:
        free_irq(client->irq, indio_dev);
+error_i2c_unregister_ara:
+       i2c_unregister_device(cm36651->ara_client);
+error_i2c_unregister_ps:
+       i2c_unregister_device(cm36651->ps_client);
 error_disable_reg:
        regulator_disable(cm36651->vled_reg);
        return ret;
@@ -698,6 +714,8 @@ static int cm36651_remove(struct i2c_client *client)
        iio_device_unregister(indio_dev);
        regulator_disable(cm36651->vled_reg);
        free_irq(client->irq, indio_dev);
+       i2c_unregister_device(cm36651->ps_client);
+       i2c_unregister_device(cm36651->ara_client);
 
        return 0;
 }
index d4e8983fba537d71b8da25b5b0768f088678722d..23f38cf2c5cd030c2ba9e3aebd0d199c4925e930 100644 (file)
@@ -1,10 +1,10 @@
 config INFINIBAND_CXGB4
-       tristate "Chelsio T4 RDMA Driver"
+       tristate "Chelsio T4/T5 RDMA Driver"
        depends on CHELSIO_T4 && INET && (IPV6 || IPV6=n)
        select GENERIC_ALLOCATOR
        ---help---
-         This is an iWARP/RDMA driver for the Chelsio T4 1GbE and
-         10GbE adapters.
+         This is an iWARP/RDMA driver for the Chelsio T4 and T5
+         1GbE, 10GbE adapters and T5 40GbE adapter.
 
          For general information about Chelsio and our products, visit
          our website at <http://www.chelsio.com>.
index 185452abf32cf336049e20802759a4352996392b..1f863a96a480fd1ab087989acea029a781c7c23e 100644 (file)
@@ -587,6 +587,10 @@ static int send_connect(struct c4iw_ep *ep)
                opt2 |= SACK_EN(1);
        if (wscale && enable_tcp_window_scaling)
                opt2 |= WND_SCALE_EN(1);
+       if (is_t5(ep->com.dev->rdev.lldi.adapter_type)) {
+               opt2 |= T5_OPT_2_VALID;
+               opt2 |= V_CONG_CNTRL(CONG_ALG_TAHOE);
+       }
        t4_set_arp_err_handler(skb, NULL, act_open_req_arp_failure);
 
        if (is_t4(ep->com.dev->rdev.lldi.adapter_type)) {
@@ -996,7 +1000,7 @@ static void close_complete_upcall(struct c4iw_ep *ep, int status)
 static int abort_connection(struct c4iw_ep *ep, struct sk_buff *skb, gfp_t gfp)
 {
        PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
-       state_set(&ep->com, ABORTING);
+       __state_set(&ep->com, ABORTING);
        set_bit(ABORT_CONN, &ep->com.history);
        return send_abort(ep, skb, gfp);
 }
@@ -1154,7 +1158,7 @@ static int update_rx_credits(struct c4iw_ep *ep, u32 credits)
        return credits;
 }
 
-static void process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
+static int process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
 {
        struct mpa_message *mpa;
        struct mpa_v2_conn_params *mpa_v2_params;
@@ -1164,6 +1168,7 @@ static void process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
        struct c4iw_qp_attributes attrs;
        enum c4iw_qp_attr_mask mask;
        int err;
+       int disconnect = 0;
 
        PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
 
@@ -1173,7 +1178,7 @@ static void process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
         * will abort the connection.
         */
        if (stop_ep_timer(ep))
-               return;
+               return 0;
 
        /*
         * If we get more than the supported amount of private data
@@ -1195,7 +1200,7 @@ static void process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
         * if we don't even have the mpa message, then bail.
         */
        if (ep->mpa_pkt_len < sizeof(*mpa))
-               return;
+               return 0;
        mpa = (struct mpa_message *) ep->mpa_pkt;
 
        /* Validate MPA header. */
@@ -1235,7 +1240,7 @@ static void process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
         * We'll continue process when more data arrives.
         */
        if (ep->mpa_pkt_len < (sizeof(*mpa) + plen))
-               return;
+               return 0;
 
        if (mpa->flags & MPA_REJECT) {
                err = -ECONNREFUSED;
@@ -1337,9 +1342,11 @@ static void process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
                attrs.layer_etype = LAYER_MPA | DDP_LLP;
                attrs.ecode = MPA_NOMATCH_RTR;
                attrs.next_state = C4IW_QP_STATE_TERMINATE;
+               attrs.send_term = 1;
                err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
-                               C4IW_QP_ATTR_NEXT_STATE, &attrs, 0);
+                               C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
                err = -ENOMEM;
+               disconnect = 1;
                goto out;
        }
 
@@ -1355,9 +1362,11 @@ static void process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
                attrs.layer_etype = LAYER_MPA | DDP_LLP;
                attrs.ecode = MPA_INSUFF_IRD;
                attrs.next_state = C4IW_QP_STATE_TERMINATE;
+               attrs.send_term = 1;
                err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
-                               C4IW_QP_ATTR_NEXT_STATE, &attrs, 0);
+                               C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
                err = -ENOMEM;
+               disconnect = 1;
                goto out;
        }
        goto out;
@@ -1366,7 +1375,7 @@ err:
        send_abort(ep, skb, GFP_KERNEL);
 out:
        connect_reply_upcall(ep, err);
-       return;
+       return disconnect;
 }
 
 static void process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb)
@@ -1524,6 +1533,7 @@ static int rx_data(struct c4iw_dev *dev, struct sk_buff *skb)
        unsigned int tid = GET_TID(hdr);
        struct tid_info *t = dev->rdev.lldi.tids;
        __u8 status = hdr->status;
+       int disconnect = 0;
 
        ep = lookup_tid(t, tid);
        if (!ep)
@@ -1539,7 +1549,7 @@ static int rx_data(struct c4iw_dev *dev, struct sk_buff *skb)
        switch (ep->com.state) {
        case MPA_REQ_SENT:
                ep->rcv_seq += dlen;
-               process_mpa_reply(ep, skb);
+               disconnect = process_mpa_reply(ep, skb);
                break;
        case MPA_REQ_WAIT:
                ep->rcv_seq += dlen;
@@ -1555,13 +1565,16 @@ static int rx_data(struct c4iw_dev *dev, struct sk_buff *skb)
                               ep->com.state, ep->hwtid, status);
                attrs.next_state = C4IW_QP_STATE_TERMINATE;
                c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
-                              C4IW_QP_ATTR_NEXT_STATE, &attrs, 0);
+                              C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
+               disconnect = 1;
                break;
        }
        default:
                break;
        }
        mutex_unlock(&ep->com.mutex);
+       if (disconnect)
+               c4iw_ep_disconnect(ep, 0, GFP_KERNEL);
        return 0;
 }
 
@@ -2009,6 +2022,10 @@ static void accept_cr(struct c4iw_ep *ep, struct sk_buff *skb,
                if (tcph->ece && tcph->cwr)
                        opt2 |= CCTRL_ECN(1);
        }
+       if (is_t5(ep->com.dev->rdev.lldi.adapter_type)) {
+               opt2 |= T5_OPT_2_VALID;
+               opt2 |= V_CONG_CNTRL(CONG_ALG_TAHOE);
+       }
 
        rpl = cplhdr(skb);
        INIT_TP_WR(rpl, ep->hwtid);
@@ -3482,9 +3499,9 @@ static void process_timeout(struct c4iw_ep *ep)
                        __func__, ep, ep->hwtid, ep->com.state);
                abort = 0;
        }
-       mutex_unlock(&ep->com.mutex);
        if (abort)
                abort_connection(ep, NULL, GFP_KERNEL);
+       mutex_unlock(&ep->com.mutex);
        c4iw_put_ep(&ep->com);
 }
 
index 7b8c5806a09d84d912d274d4a5da814109e921b8..7474b490760a413f9f13d9e04ead79319a6fd55e 100644 (file)
@@ -435,6 +435,7 @@ struct c4iw_qp_attributes {
        u8 ecode;
        u16 sq_db_inc;
        u16 rq_db_inc;
+       u8 send_term;
 };
 
 struct c4iw_qp {
index 7b5114cb486f64f118beb7f2415ad415d75f40ae..086f62f5dc9e2ba5978e81f02e8e392c8e201774 100644 (file)
@@ -1388,11 +1388,12 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
                        qhp->attr.layer_etype = attrs->layer_etype;
                        qhp->attr.ecode = attrs->ecode;
                        ep = qhp->ep;
-                       disconnect = 1;
-                       c4iw_get_ep(&qhp->ep->com);
-                       if (!internal)
+                       if (!internal) {
+                               c4iw_get_ep(&qhp->ep->com);
                                terminate = 1;
-                       else {
+                               disconnect = 1;
+                       } else {
+                               terminate = qhp->attr.send_term;
                                ret = rdma_fini(rhp, qhp, ep);
                                if (ret)
                                        goto err;
@@ -1776,11 +1777,15 @@ int c4iw_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
        /*
         * Use SQ_PSN and RQ_PSN to pass in IDX_INC values for
         * ringing the queue db when we're in DB_FULL mode.
+        * Only allow this on T4 devices.
         */
        attrs.sq_db_inc = attr->sq_psn;
        attrs.rq_db_inc = attr->rq_psn;
        mask |= (attr_mask & IB_QP_SQ_PSN) ? C4IW_QP_ATTR_SQ_DB : 0;
        mask |= (attr_mask & IB_QP_RQ_PSN) ? C4IW_QP_ATTR_RQ_DB : 0;
+       if (is_t5(to_c4iw_qp(ibqp)->rhp->rdev.lldi.adapter_type) &&
+           (mask & (C4IW_QP_ATTR_SQ_DB|C4IW_QP_ATTR_RQ_DB)))
+               return -EINVAL;
 
        return c4iw_modify_qp(rhp, qhp, mask, &attrs, 0);
 }
index dc193c292671ca49e889bc6bedb10d7c1a5fe8ce..6121ca08fe588bff67aab81fe7df06119287292b 100644 (file)
@@ -836,4 +836,18 @@ struct ulptx_idata {
 #define V_RX_DACK_CHANGE(x) ((x) << S_RX_DACK_CHANGE)
 #define F_RX_DACK_CHANGE    V_RX_DACK_CHANGE(1U)
 
+enum {                     /* TCP congestion control algorithms */
+       CONG_ALG_RENO,
+       CONG_ALG_TAHOE,
+       CONG_ALG_NEWRENO,
+       CONG_ALG_HIGHSPEED
+};
+
+#define S_CONG_CNTRL    14
+#define M_CONG_CNTRL    0x3
+#define V_CONG_CNTRL(x) ((x) << S_CONG_CNTRL)
+#define G_CONG_CNTRL(x) (((x) >> S_CONG_CNTRL) & M_CONG_CNTRL)
+
+#define T5_OPT_2_VALID       (1 << 31)
+
 #endif /* _T4FW_RI_API_H_ */
index c4b3940845e60570fcbc5e5d0fa5ef807551d45e..078cadd6c797afeb0e22267fb76e5362a4b97326 100644 (file)
@@ -105,5 +105,5 @@ static const struct ethtool_ops ipoib_ethtool_ops = {
 
 void ipoib_set_ethtool_ops(struct net_device *dev)
 {
-       SET_ETHTOOL_OPS(dev, &ipoib_ethtool_ops);
+       dev->ethtool_ops = &ipoib_ethtool_ops;
 }
index 4b11ede34950e57587daa09f8c0af0ed63602ea4..4765799fef746b0e7900b4f8aaf327f8af717576 100644 (file)
@@ -109,7 +109,6 @@ static int da9055_onkey_probe(struct platform_device *pdev)
 
        INIT_DELAYED_WORK(&onkey->work, da9055_onkey_work);
 
-       irq = regmap_irq_get_virq(da9055->irq_data, irq);
        err = request_threaded_irq(irq, NULL, da9055_onkey_irq,
                                   IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
                                   "ONKEY", onkey);
index 08ead2aaede5d8ba8a324ff8157beb74d7984b2d..20c80f543d5e56fbe59f15403f2d64d9cb791cff 100644 (file)
@@ -169,6 +169,7 @@ static int soc_button_pnp_probe(struct pnp_dev *pdev,
                                soc_button_remove(pdev);
                                return error;
                        }
+                       continue;
                }
 
                priv->children[i] = pd;
index ef1cf52f8bb99212a96e9fe415ebf81bbb1e21bf..088d3541c7d3d4485380dd859e9777c6e39c94d2 100644 (file)
@@ -1353,6 +1353,7 @@ static int elantech_set_properties(struct elantech_data *etd)
                case 6:
                case 7:
                case 8:
+               case 9:
                        etd->hw_version = 4;
                        break;
                default:
index d8d49d10f9bb60d477124bba603fb9f61be1efdb..ef9f4913450d12a06262dd906b8ebe93b0401dbe 100644 (file)
@@ -117,6 +117,44 @@ void synaptics_reset(struct psmouse *psmouse)
 }
 
 #ifdef CONFIG_MOUSE_PS2_SYNAPTICS
+/* This list has been kindly provided by Synaptics. */
+static const char * const topbuttonpad_pnp_ids[] = {
+       "LEN0017",
+       "LEN0018",
+       "LEN0019",
+       "LEN0023",
+       "LEN002A",
+       "LEN002B",
+       "LEN002C",
+       "LEN002D",
+       "LEN002E",
+       "LEN0033", /* Helix */
+       "LEN0034", /* T431s, T540, X1 Carbon 2nd */
+       "LEN0035", /* X240 */
+       "LEN0036", /* T440 */
+       "LEN0037",
+       "LEN0038",
+       "LEN0041",
+       "LEN0042", /* Yoga */
+       "LEN0045",
+       "LEN0046",
+       "LEN0047",
+       "LEN0048",
+       "LEN0049",
+       "LEN2000",
+       "LEN2001",
+       "LEN2002",
+       "LEN2003",
+       "LEN2004", /* L440 */
+       "LEN2005",
+       "LEN2006",
+       "LEN2007",
+       "LEN2008",
+       "LEN2009",
+       "LEN200A",
+       "LEN200B",
+       NULL
+};
 
 /*****************************************************************************
  *     Synaptics communications functions
@@ -1255,8 +1293,10 @@ static void set_abs_position_params(struct input_dev *dev,
        input_abs_set_res(dev, y_code, priv->y_res);
 }
 
-static void set_input_params(struct input_dev *dev, struct synaptics_data *priv)
+static void set_input_params(struct psmouse *psmouse,
+                            struct synaptics_data *priv)
 {
+       struct input_dev *dev = psmouse->dev;
        int i;
 
        /* Things that apply to both modes */
@@ -1325,6 +1365,17 @@ static void set_input_params(struct input_dev *dev, struct synaptics_data *priv)
 
        if (SYN_CAP_CLICKPAD(priv->ext_cap_0c)) {
                __set_bit(INPUT_PROP_BUTTONPAD, dev->propbit);
+               /* See if this buttonpad has a top button area */
+               if (!strncmp(psmouse->ps2dev.serio->firmware_id, "PNP:", 4)) {
+                       for (i = 0; topbuttonpad_pnp_ids[i]; i++) {
+                               if (strstr(psmouse->ps2dev.serio->firmware_id,
+                                          topbuttonpad_pnp_ids[i])) {
+                                       __set_bit(INPUT_PROP_TOPBUTTONPAD,
+                                                 dev->propbit);
+                                       break;
+                               }
+                       }
+               }
                /* Clickpads report only left button */
                __clear_bit(BTN_RIGHT, dev->keybit);
                __clear_bit(BTN_MIDDLE, dev->keybit);
@@ -1514,6 +1565,14 @@ static const struct dmi_system_id min_max_dmi_table[] __initconst = {
                },
                .driver_data = (int []){1232, 5710, 1156, 4696},
        },
+       {
+               /* Lenovo ThinkPad T431s */
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+                       DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T431"),
+               },
+               .driver_data = (int []){1024, 5112, 2024, 4832},
+       },
        {
                /* Lenovo ThinkPad T440s */
                .matches = {
@@ -1522,6 +1581,14 @@ static const struct dmi_system_id min_max_dmi_table[] __initconst = {
                },
                .driver_data = (int []){1024, 5112, 2024, 4832},
        },
+       {
+               /* Lenovo ThinkPad L440 */
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+                       DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad L440"),
+               },
+               .driver_data = (int []){1024, 5112, 2024, 4832},
+       },
        {
                /* Lenovo ThinkPad T540p */
                .matches = {
@@ -1530,6 +1597,32 @@ static const struct dmi_system_id min_max_dmi_table[] __initconst = {
                },
                .driver_data = (int []){1024, 5056, 2058, 4832},
        },
+       {
+               /* Lenovo ThinkPad L540 */
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+                       DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad L540"),
+               },
+               .driver_data = (int []){1024, 5112, 2024, 4832},
+       },
+       {
+               /* Lenovo Yoga S1 */
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+                       DMI_EXACT_MATCH(DMI_PRODUCT_VERSION,
+                                       "ThinkPad S1 Yoga"),
+               },
+               .driver_data = (int []){1232, 5710, 1156, 4696},
+       },
+       {
+               /* Lenovo ThinkPad X1 Carbon Haswell (3rd generation) */
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+                       DMI_MATCH(DMI_PRODUCT_VERSION,
+                                       "ThinkPad X1 Carbon 2nd"),
+               },
+               .driver_data = (int []){1024, 5112, 2024, 4832},
+       },
 #endif
        { }
 };
@@ -1593,7 +1686,7 @@ static int __synaptics_init(struct psmouse *psmouse, bool absolute_mode)
                     priv->capabilities, priv->ext_cap, priv->ext_cap_0c,
                     priv->board_id, priv->firmware_id);
 
-       set_input_params(psmouse->dev, priv);
+       set_input_params(psmouse, priv);
 
        /*
         * Encode touchpad model so that it can be used to set
index 0ec9abbe31fec3af5248808fce517fc863ff75b2..381b20d4c5618d8fc7f5c3e616479d4663d9ae03 100644 (file)
@@ -702,6 +702,17 @@ static int i8042_pnp_aux_irq;
 static char i8042_pnp_kbd_name[32];
 static char i8042_pnp_aux_name[32];
 
+static void i8042_pnp_id_to_string(struct pnp_id *id, char *dst, int dst_size)
+{
+       strlcpy(dst, "PNP:", dst_size);
+
+       while (id) {
+               strlcat(dst, " ", dst_size);
+               strlcat(dst, id->id, dst_size);
+               id = id->next;
+       }
+}
+
 static int i8042_pnp_kbd_probe(struct pnp_dev *dev, const struct pnp_device_id *did)
 {
        if (pnp_port_valid(dev, 0) && pnp_port_len(dev, 0) == 1)
@@ -718,6 +729,8 @@ static int i8042_pnp_kbd_probe(struct pnp_dev *dev, const struct pnp_device_id *
                strlcat(i8042_pnp_kbd_name, ":", sizeof(i8042_pnp_kbd_name));
                strlcat(i8042_pnp_kbd_name, pnp_dev_name(dev), sizeof(i8042_pnp_kbd_name));
        }
+       i8042_pnp_id_to_string(dev->id, i8042_kbd_firmware_id,
+                              sizeof(i8042_kbd_firmware_id));
 
        /* Keyboard ports are always supposed to be wakeup-enabled */
        device_set_wakeup_enable(&dev->dev, true);
@@ -742,6 +755,8 @@ static int i8042_pnp_aux_probe(struct pnp_dev *dev, const struct pnp_device_id *
                strlcat(i8042_pnp_aux_name, ":", sizeof(i8042_pnp_aux_name));
                strlcat(i8042_pnp_aux_name, pnp_dev_name(dev), sizeof(i8042_pnp_aux_name));
        }
+       i8042_pnp_id_to_string(dev->id, i8042_aux_firmware_id,
+                              sizeof(i8042_aux_firmware_id));
 
        i8042_pnp_aux_devices++;
        return 0;
index 020053fa5aaa38fce82b5fc2d6ced8546661c9df..3807c3e971cca79e6ff42b745409ac418487bb0c 100644 (file)
@@ -87,6 +87,8 @@ MODULE_PARM_DESC(debug, "Turn i8042 debugging mode on and off");
 #endif
 
 static bool i8042_bypass_aux_irq_test;
+static char i8042_kbd_firmware_id[128];
+static char i8042_aux_firmware_id[128];
 
 #include "i8042.h"
 
@@ -1218,6 +1220,8 @@ static int __init i8042_create_kbd_port(void)
        serio->dev.parent       = &i8042_platform_device->dev;
        strlcpy(serio->name, "i8042 KBD port", sizeof(serio->name));
        strlcpy(serio->phys, I8042_KBD_PHYS_DESC, sizeof(serio->phys));
+       strlcpy(serio->firmware_id, i8042_kbd_firmware_id,
+               sizeof(serio->firmware_id));
 
        port->serio = serio;
        port->irq = I8042_KBD_IRQ;
@@ -1244,6 +1248,8 @@ static int __init i8042_create_aux_port(int idx)
        if (idx < 0) {
                strlcpy(serio->name, "i8042 AUX port", sizeof(serio->name));
                strlcpy(serio->phys, I8042_AUX_PHYS_DESC, sizeof(serio->phys));
+               strlcpy(serio->firmware_id, i8042_aux_firmware_id,
+                       sizeof(serio->firmware_id));
                serio->close = i8042_port_close;
        } else {
                snprintf(serio->name, sizeof(serio->name), "i8042 AUX%d port", idx);
index 8f4c4ab04bc2d8c61d5fda2a65696806e61ad6f2..b29134de983b85ff2df85ba171d8e4a7f8cdac5b 100644 (file)
@@ -451,6 +451,13 @@ static ssize_t serio_set_bind_mode(struct device *dev, struct device_attribute *
        return retval;
 }
 
+static ssize_t firmware_id_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+       struct serio *serio = to_serio_port(dev);
+
+       return sprintf(buf, "%s\n", serio->firmware_id);
+}
+
 static DEVICE_ATTR_RO(type);
 static DEVICE_ATTR_RO(proto);
 static DEVICE_ATTR_RO(id);
@@ -473,12 +480,14 @@ static DEVICE_ATTR_RO(modalias);
 static DEVICE_ATTR_WO(drvctl);
 static DEVICE_ATTR(description, S_IRUGO, serio_show_description, NULL);
 static DEVICE_ATTR(bind_mode, S_IWUSR | S_IRUGO, serio_show_bind_mode, serio_set_bind_mode);
+static DEVICE_ATTR_RO(firmware_id);
 
 static struct attribute *serio_device_attrs[] = {
        &dev_attr_modalias.attr,
        &dev_attr_description.attr,
        &dev_attr_drvctl.attr,
        &dev_attr_bind_mode.attr,
+       &dev_attr_firmware_id.attr,
        NULL
 };
 
@@ -921,9 +930,14 @@ static int serio_uevent(struct device *dev, struct kobj_uevent_env *env)
        SERIO_ADD_UEVENT_VAR("SERIO_PROTO=%02x", serio->id.proto);
        SERIO_ADD_UEVENT_VAR("SERIO_ID=%02x", serio->id.id);
        SERIO_ADD_UEVENT_VAR("SERIO_EXTRA=%02x", serio->id.extra);
+
        SERIO_ADD_UEVENT_VAR("MODALIAS=serio:ty%02Xpr%02Xid%02Xex%02X",
                                serio->id.type, serio->id.proto, serio->id.id, serio->id.extra);
 
+       if (serio->firmware_id[0])
+               SERIO_ADD_UEVENT_VAR("SERIO_FIRMWARE_ID=%s",
+                                    serio->firmware_id);
+
        return 0;
 }
 #undef SERIO_ADD_UEVENT_VAR
index b16ebef5b9111c7a999845300e1c33070f2a31b4..611fc3905d00d9fafa7cf1088b28f6c22fccca28 100644 (file)
 #define HID_USAGE_PAGE_DIGITIZER       0x0d
 #define HID_USAGE_PAGE_DESKTOP         0x01
 #define HID_USAGE                      0x09
-#define HID_USAGE_X                    0x30
-#define HID_USAGE_Y                    0x31
-#define HID_USAGE_X_TILT               0x3d
-#define HID_USAGE_Y_TILT               0x3e
-#define HID_USAGE_FINGER               0x22
-#define HID_USAGE_STYLUS               0x20
-#define HID_USAGE_CONTACTMAX           0x55
+#define HID_USAGE_X                    ((HID_USAGE_PAGE_DESKTOP << 16) | 0x30)
+#define HID_USAGE_Y                    ((HID_USAGE_PAGE_DESKTOP << 16) | 0x31)
+#define HID_USAGE_PRESSURE             ((HID_USAGE_PAGE_DIGITIZER << 16) | 0x30)
+#define HID_USAGE_X_TILT               ((HID_USAGE_PAGE_DIGITIZER << 16) | 0x3d)
+#define HID_USAGE_Y_TILT               ((HID_USAGE_PAGE_DIGITIZER << 16) | 0x3e)
+#define HID_USAGE_FINGER               ((HID_USAGE_PAGE_DIGITIZER << 16) | 0x22)
+#define HID_USAGE_STYLUS               ((HID_USAGE_PAGE_DIGITIZER << 16) | 0x20)
+#define HID_USAGE_CONTACTMAX           ((HID_USAGE_PAGE_DIGITIZER << 16) | 0x55)
 #define HID_COLLECTION                 0xa1
 #define HID_COLLECTION_LOGICAL         0x02
 #define HID_COLLECTION_END             0xc0
 
-enum {
-       WCM_UNDEFINED = 0,
-       WCM_DESKTOP,
-       WCM_DIGITIZER,
-};
-
 struct hid_descriptor {
        struct usb_descriptor_header header;
        __le16   bcdHID;
@@ -305,7 +300,7 @@ static int wacom_parse_hid(struct usb_interface *intf,
        char limit = 0;
        /* result has to be defined as int for some devices */
        int result = 0, touch_max = 0;
-       int i = 0, usage = WCM_UNDEFINED, finger = 0, pen = 0;
+       int i = 0, page = 0, finger = 0, pen = 0;
        unsigned char *report;
 
        report = kzalloc(hid_desc->wDescriptorLength, GFP_KERNEL);
@@ -332,134 +327,121 @@ static int wacom_parse_hid(struct usb_interface *intf,
 
                switch (report[i]) {
                case HID_USAGE_PAGE:
-                       switch (report[i + 1]) {
-                       case HID_USAGE_PAGE_DIGITIZER:
-                               usage = WCM_DIGITIZER;
-                               i++;
-                               break;
-
-                       case HID_USAGE_PAGE_DESKTOP:
-                               usage = WCM_DESKTOP;
-                               i++;
-                               break;
-                       }
+                       page = report[i + 1];
+                       i++;
                        break;
 
                case HID_USAGE:
-                       switch (report[i + 1]) {
+                       switch (page << 16 | report[i + 1]) {
                        case HID_USAGE_X:
-                               if (usage == WCM_DESKTOP) {
-                                       if (finger) {
-                                               features->device_type = BTN_TOOL_FINGER;
-                                               /* touch device at least supports one touch point */
-                                               touch_max = 1;
-                                               switch (features->type) {
-                                               case TABLETPC2FG:
-                                                       features->pktlen = WACOM_PKGLEN_TPC2FG;
-                                                       break;
-
-                                               case MTSCREEN:
-                                               case WACOM_24HDT:
-                                                       features->pktlen = WACOM_PKGLEN_MTOUCH;
-                                                       break;
-
-                                               case MTTPC:
-                                                       features->pktlen = WACOM_PKGLEN_MTTPC;
-                                                       break;
-
-                                               case BAMBOO_PT:
-                                                       features->pktlen = WACOM_PKGLEN_BBTOUCH;
-                                                       break;
-
-                                               default:
-                                                       features->pktlen = WACOM_PKGLEN_GRAPHIRE;
-                                                       break;
-                                               }
-
-                                               switch (features->type) {
-                                               case BAMBOO_PT:
-                                                       features->x_phy =
-                                                               get_unaligned_le16(&report[i + 5]);
-                                                       features->x_max =
-                                                               get_unaligned_le16(&report[i + 8]);
-                                                       i += 15;
-                                                       break;
-
-                                               case WACOM_24HDT:
-                                                       features->x_max =
-                                                               get_unaligned_le16(&report[i + 3]);
-                                                       features->x_phy =
-                                                               get_unaligned_le16(&report[i + 8]);
-                                                       features->unit = report[i - 1];
-                                                       features->unitExpo = report[i - 3];
-                                                       i += 12;
-                                                       break;
-
-                                               default:
-                                                       features->x_max =
-                                                               get_unaligned_le16(&report[i + 3]);
-                                                       features->x_phy =
-                                                               get_unaligned_le16(&report[i + 6]);
-                                                       features->unit = report[i + 9];
-                                                       features->unitExpo = report[i + 11];
-                                                       i += 12;
-                                                       break;
-                                               }
-                                       } else if (pen) {
-                                               /* penabled only accepts exact bytes of data */
-                                               if (features->type >= TABLETPC)
-                                                       features->pktlen = WACOM_PKGLEN_GRAPHIRE;
-                                               features->device_type = BTN_TOOL_PEN;
+                               if (finger) {
+                                       features->device_type = BTN_TOOL_FINGER;
+                                       /* touch device at least supports one touch point */
+                                       touch_max = 1;
+                                       switch (features->type) {
+                                       case TABLETPC2FG:
+                                               features->pktlen = WACOM_PKGLEN_TPC2FG;
+                                               break;
+
+                                       case MTSCREEN:
+                                       case WACOM_24HDT:
+                                               features->pktlen = WACOM_PKGLEN_MTOUCH;
+                                               break;
+
+                                       case MTTPC:
+                                               features->pktlen = WACOM_PKGLEN_MTTPC;
+                                               break;
+
+                                       case BAMBOO_PT:
+                                               features->pktlen = WACOM_PKGLEN_BBTOUCH;
+                                               break;
+
+                                       default:
+                                               features->pktlen = WACOM_PKGLEN_GRAPHIRE;
+                                               break;
+                                       }
+
+                                       switch (features->type) {
+                                       case BAMBOO_PT:
+                                               features->x_phy =
+                                                       get_unaligned_le16(&report[i + 5]);
+                                               features->x_max =
+                                                       get_unaligned_le16(&report[i + 8]);
+                                               i += 15;
+                                               break;
+
+                                       case WACOM_24HDT:
                                                features->x_max =
                                                        get_unaligned_le16(&report[i + 3]);
-                                               i += 4;
+                                               features->x_phy =
+                                                       get_unaligned_le16(&report[i + 8]);
+                                               features->unit = report[i - 1];
+                                               features->unitExpo = report[i - 3];
+                                               i += 12;
+                                               break;
+
+                                       default:
+                                               features->x_max =
+                                                       get_unaligned_le16(&report[i + 3]);
+                                               features->x_phy =
+                                                       get_unaligned_le16(&report[i + 6]);
+                                               features->unit = report[i + 9];
+                                               features->unitExpo = report[i + 11];
+                                               i += 12;
+                                               break;
                                        }
+                               } else if (pen) {
+                                       /* penabled only accepts exact bytes of data */
+                                       if (features->type >= TABLETPC)
+                                               features->pktlen = WACOM_PKGLEN_GRAPHIRE;
+                                       features->device_type = BTN_TOOL_PEN;
+                                       features->x_max =
+                                               get_unaligned_le16(&report[i + 3]);
+                                       i += 4;
                                }
                                break;
 
                        case HID_USAGE_Y:
-                               if (usage == WCM_DESKTOP) {
-                                       if (finger) {
-                                               switch (features->type) {
-                                               case TABLETPC2FG:
-                                               case MTSCREEN:
-                                               case MTTPC:
-                                                       features->y_max =
-                                                               get_unaligned_le16(&report[i + 3]);
-                                                       features->y_phy =
-                                                               get_unaligned_le16(&report[i + 6]);
-                                                       i += 7;
-                                                       break;
-
-                                               case WACOM_24HDT:
-                                                       features->y_max =
-                                                               get_unaligned_le16(&report[i + 3]);
-                                                       features->y_phy =
-                                                               get_unaligned_le16(&report[i - 2]);
-                                                       i += 7;
-                                                       break;
-
-                                               case BAMBOO_PT:
-                                                       features->y_phy =
-                                                               get_unaligned_le16(&report[i + 3]);
-                                                       features->y_max =
-                                                               get_unaligned_le16(&report[i + 6]);
-                                                       i += 12;
-                                                       break;
-
-                                               default:
-                                                       features->y_max =
-                                                               features->x_max;
-                                                       features->y_phy =
-                                                               get_unaligned_le16(&report[i + 3]);
-                                                       i += 4;
-                                                       break;
-                                               }
-                                       } else if (pen) {
+                               if (finger) {
+                                       switch (features->type) {
+                                       case TABLETPC2FG:
+                                       case MTSCREEN:
+                                       case MTTPC:
+                                               features->y_max =
+                                                       get_unaligned_le16(&report[i + 3]);
+                                               features->y_phy =
+                                                       get_unaligned_le16(&report[i + 6]);
+                                               i += 7;
+                                               break;
+
+                                       case WACOM_24HDT:
+                                               features->y_max =
+                                                       get_unaligned_le16(&report[i + 3]);
+                                               features->y_phy =
+                                                       get_unaligned_le16(&report[i - 2]);
+                                               i += 7;
+                                               break;
+
+                                       case BAMBOO_PT:
+                                               features->y_phy =
+                                                       get_unaligned_le16(&report[i + 3]);
+                                               features->y_max =
+                                                       get_unaligned_le16(&report[i + 6]);
+                                               i += 12;
+                                               break;
+
+                                       default:
                                                features->y_max =
+                                                       features->x_max;
+                                               features->y_phy =
                                                        get_unaligned_le16(&report[i + 3]);
                                                i += 4;
+                                               break;
                                        }
+                               } else if (pen) {
+                                       features->y_max =
+                                               get_unaligned_le16(&report[i + 3]);
+                                       i += 4;
                                }
                                break;
 
@@ -484,12 +466,20 @@ static int wacom_parse_hid(struct usb_interface *intf,
                                        wacom_retrieve_report_data(intf, features);
                                i++;
                                break;
+
+                       case HID_USAGE_PRESSURE:
+                               if (pen) {
+                                       features->pressure_max =
+                                               get_unaligned_le16(&report[i + 3]);
+                                       i += 4;
+                               }
+                               break;
                        }
                        break;
 
                case HID_COLLECTION_END:
                        /* reset UsagePage and Finger */
-                       finger = usage = 0;
+                       finger = page = 0;
                        break;
 
                case HID_COLLECTION:
index 05f371df6c400a882621c13509f6a13a40eed077..4822c57a3756f4e3175fac3e1def90611028eb6d 100644 (file)
@@ -178,10 +178,9 @@ static int wacom_ptu_irq(struct wacom_wac *wacom)
 
 static int wacom_dtu_irq(struct wacom_wac *wacom)
 {
-       struct wacom_features *features = &wacom->features;
-       char *data = wacom->data;
+       unsigned char *data = wacom->data;
        struct input_dev *input = wacom->input;
-       int prox = data[1] & 0x20, pressure;
+       int prox = data[1] & 0x20;
 
        dev_dbg(input->dev.parent,
                "%s: received report #%d", __func__, data[0]);
@@ -198,10 +197,7 @@ static int wacom_dtu_irq(struct wacom_wac *wacom)
        input_report_key(input, BTN_STYLUS2, data[1] & 0x10);
        input_report_abs(input, ABS_X, le16_to_cpup((__le16 *)&data[2]));
        input_report_abs(input, ABS_Y, le16_to_cpup((__le16 *)&data[4]));
-       pressure = ((data[7] & 0x01) << 8) | data[6];
-       if (pressure < 0)
-               pressure = features->pressure_max + pressure + 1;
-       input_report_abs(input, ABS_PRESSURE, pressure);
+       input_report_abs(input, ABS_PRESSURE, ((data[7] & 0x01) << 8) | data[6]);
        input_report_key(input, BTN_TOUCH, data[1] & 0x05);
        if (!prox) /* out-prox */
                wacom->id[0] = 0;
@@ -906,7 +902,7 @@ static int int_dist(int x1, int y1, int x2, int y2)
 static int wacom_24hdt_irq(struct wacom_wac *wacom)
 {
        struct input_dev *input = wacom->input;
-       char *data = wacom->data;
+       unsigned char *data = wacom->data;
        int i;
        int current_num_contacts = data[61];
        int contacts_to_send = 0;
@@ -959,7 +955,7 @@ static int wacom_24hdt_irq(struct wacom_wac *wacom)
 static int wacom_mt_touch(struct wacom_wac *wacom)
 {
        struct input_dev *input = wacom->input;
-       char *data = wacom->data;
+       unsigned char *data = wacom->data;
        int i;
        int current_num_contacts = data[2];
        int contacts_to_send = 0;
@@ -1038,7 +1034,7 @@ static int wacom_tpc_mt_touch(struct wacom_wac *wacom)
 
 static int wacom_tpc_single_touch(struct wacom_wac *wacom, size_t len)
 {
-       char *data = wacom->data;
+       unsigned char *data = wacom->data;
        struct input_dev *input = wacom->input;
        bool prox;
        int x = 0, y = 0;
@@ -1074,10 +1070,8 @@ static int wacom_tpc_single_touch(struct wacom_wac *wacom, size_t len)
 
 static int wacom_tpc_pen(struct wacom_wac *wacom)
 {
-       struct wacom_features *features = &wacom->features;
-       char *data = wacom->data;
+       unsigned char *data = wacom->data;
        struct input_dev *input = wacom->input;
-       int pressure;
        bool prox = data[1] & 0x20;
 
        if (!wacom->shared->stylus_in_proximity) /* first in prox */
@@ -1093,10 +1087,7 @@ static int wacom_tpc_pen(struct wacom_wac *wacom)
                input_report_key(input, BTN_STYLUS2, data[1] & 0x10);
                input_report_abs(input, ABS_X, le16_to_cpup((__le16 *)&data[2]));
                input_report_abs(input, ABS_Y, le16_to_cpup((__le16 *)&data[4]));
-               pressure = ((data[7] & 0x01) << 8) | data[6];
-               if (pressure < 0)
-                       pressure = features->pressure_max + pressure + 1;
-               input_report_abs(input, ABS_PRESSURE, pressure);
+               input_report_abs(input, ABS_PRESSURE, ((data[7] & 0x03) << 8) | data[6]);
                input_report_key(input, BTN_TOUCH, data[1] & 0x05);
                input_report_key(input, wacom->tool[0], prox);
                return 1;
@@ -1107,7 +1098,7 @@ static int wacom_tpc_pen(struct wacom_wac *wacom)
 
 static int wacom_tpc_irq(struct wacom_wac *wacom, size_t len)
 {
-       char *data = wacom->data;
+       unsigned char *data = wacom->data;
 
        dev_dbg(wacom->input->dev.parent,
                "%s: received report #%d\n", __func__, data[0]);
@@ -1838,7 +1829,7 @@ int wacom_setup_input_capabilities(struct input_dev *input_dev,
        case DTU:
                if (features->type == DTUS) {
                        input_set_capability(input_dev, EV_MSC, MSC_SERIAL);
-                       for (i = 0; i < 3; i++)
+                       for (i = 0; i < 4; i++)
                                __set_bit(BTN_0 + i, input_dev->keybit);
                }
                __set_bit(BTN_TOOL_PEN, input_dev->keybit);
index 45a06e495ed25702831441da6be558fa88491671..7f8aa981500d88c10d8b23cc19f489befe7bedd6 100644 (file)
@@ -425,7 +425,7 @@ static int ads7845_read12_ser(struct device *dev, unsigned command)
 name ## _show(struct device *dev, struct device_attribute *attr, char *buf) \
 { \
        struct ads7846 *ts = dev_get_drvdata(dev); \
-       ssize_t v = ads7846_read12_ser(dev, \
+       ssize_t v = ads7846_read12_ser(&ts->spi->dev, \
                        READ_12BIT_SER(var)); \
        if (v < 0) \
                return v; \
index 8b89e33a89fe99057d99e322ce2c287b41c0eb2f..647c3c7fd7428f31dd2b313a1fc8b29ac33b482c 100644 (file)
@@ -1381,7 +1381,7 @@ static int arm_smmu_alloc_init_pmd(struct arm_smmu_device *smmu, pud_t *pud,
 
        do {
                next = pmd_addr_end(addr, end);
-               ret = arm_smmu_alloc_init_pte(smmu, pmd, addr, end, pfn,
+               ret = arm_smmu_alloc_init_pte(smmu, pmd, addr, next, pfn,
                                              prot, stage);
                phys += next - addr;
        } while (pmd++, addr = next, addr < end);
@@ -1499,7 +1499,7 @@ static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
 
        ret = arm_smmu_handle_mapping(smmu_domain, iova, 0, size, 0);
        arm_smmu_tlb_inv_context(&smmu_domain->root_cfg);
-       return ret ? ret : size;
+       return ret ? 0 : size;
 }
 
 static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
index f445c10df8dfd2beb53b8fb4628d7094d0e17b5a..39f8b717fe8482f6e757bc7f9064187923b90cca 100644 (file)
@@ -152,7 +152,8 @@ dmar_alloc_pci_notify_info(struct pci_dev *dev, unsigned long event)
        info->seg = pci_domain_nr(dev->bus);
        info->level = level;
        if (event == BUS_NOTIFY_ADD_DEVICE) {
-               for (tmp = dev, level--; tmp; tmp = tmp->bus->self) {
+               for (tmp = dev; tmp; tmp = tmp->bus->self) {
+                       level--;
                        info->path[level].device = PCI_SLOT(tmp->devfn);
                        info->path[level].function = PCI_FUNC(tmp->devfn);
                        if (pci_is_root_bus(tmp->bus))
index 69fa7da5e48beba40a9595f67117505efc4e069b..f256ffc02e29df18ce8c43266fafe68b1971beb0 100644 (file)
@@ -1009,11 +1009,13 @@ static struct page *dma_pte_list_pagetables(struct dmar_domain *domain,
        if (level == 1)
                return freelist;
 
-       for (pte = page_address(pg); !first_pte_in_page(pte); pte++) {
+       pte = page_address(pg);
+       do {
                if (dma_pte_present(pte) && !dma_pte_superpage(pte))
                        freelist = dma_pte_list_pagetables(domain, level - 1,
                                                           pte, freelist);
-       }
+               pte++;
+       } while (!first_pte_in_page(pte));
 
        return freelist;
 }
@@ -2235,7 +2237,9 @@ static struct dmar_domain *get_domain_for_dev(struct device *dev, int gaw)
                                bridge_devfn = dev_tmp->devfn;
                        }
                        spin_lock_irqsave(&device_domain_lock, flags);
-                       info = dmar_search_domain_by_dev_info(segment, bus, devfn);
+                       info = dmar_search_domain_by_dev_info(segment,
+                                                             bridge_bus,
+                                                             bridge_devfn);
                        if (info) {
                                iommu = info->iommu;
                                domain = info->domain;
index 41be897df8d5521250d79dee5362c08fe0f80067..3899ba7821c5e78d4496c29ad3fba2b8b4ffcad9 100644 (file)
@@ -41,6 +41,7 @@
 #define ARMADA_370_XP_INT_SET_ENABLE_OFFS      (0x30)
 #define ARMADA_370_XP_INT_CLEAR_ENABLE_OFFS    (0x34)
 #define ARMADA_370_XP_INT_SOURCE_CTL(irq)      (0x100 + irq*4)
+#define ARMADA_370_XP_INT_SOURCE_CPU_MASK      0xF
 
 #define ARMADA_370_XP_CPU_INTACK_OFFS          (0x44)
 #define ARMADA_375_PPI_CAUSE                   (0x10)
@@ -132,8 +133,7 @@ static int armada_370_xp_setup_msi_irq(struct msi_chip *chip,
                                       struct msi_desc *desc)
 {
        struct msi_msg msg;
-       irq_hw_number_t hwirq;
-       int virq;
+       int virq, hwirq;
 
        hwirq = armada_370_xp_alloc_msi();
        if (hwirq < 0)
@@ -159,8 +159,19 @@ static void armada_370_xp_teardown_msi_irq(struct msi_chip *chip,
                                           unsigned int irq)
 {
        struct irq_data *d = irq_get_irq_data(irq);
+       unsigned long hwirq = d->hwirq;
+
        irq_dispose_mapping(irq);
-       armada_370_xp_free_msi(d->hwirq);
+       armada_370_xp_free_msi(hwirq);
+}
+
+static int armada_370_xp_check_msi_device(struct msi_chip *chip, struct pci_dev *dev,
+                                         int nvec, int type)
+{
+       /* We support MSI, but not MSI-X */
+       if (type == PCI_CAP_ID_MSI)
+               return 0;
+       return -EINVAL;
 }
 
 static struct irq_chip armada_370_xp_msi_irq_chip = {
@@ -201,6 +212,7 @@ static int armada_370_xp_msi_init(struct device_node *node,
 
        msi_chip->setup_irq = armada_370_xp_setup_msi_irq;
        msi_chip->teardown_irq = armada_370_xp_teardown_msi_irq;
+       msi_chip->check_device = armada_370_xp_check_msi_device;
        msi_chip->of_node = node;
 
        armada_370_xp_msi_domain =
@@ -244,35 +256,18 @@ static DEFINE_RAW_SPINLOCK(irq_controller_lock);
 static int armada_xp_set_affinity(struct irq_data *d,
                                  const struct cpumask *mask_val, bool force)
 {
-       unsigned long reg;
-       unsigned long new_mask = 0;
-       unsigned long online_mask = 0;
-       unsigned long count = 0;
        irq_hw_number_t hwirq = irqd_to_hwirq(d);
+       unsigned long reg, mask;
        int cpu;
 
-       for_each_cpu(cpu, mask_val) {
-               new_mask |= 1 << cpu_logical_map(cpu);
-               count++;
-       }
-
-       /*
-        * Forbid mutlicore interrupt affinity
-        * This is required since the MPIC HW doesn't limit
-        * several CPUs from acknowledging the same interrupt.
-        */
-       if (count > 1)
-               return -EINVAL;
-
-       for_each_cpu(cpu, cpu_online_mask)
-               online_mask |= 1 << cpu_logical_map(cpu);
+       /* Select a single core from the affinity mask which is online */
+       cpu = cpumask_any_and(mask_val, cpu_online_mask);
+       mask = 1UL << cpu_logical_map(cpu);
 
        raw_spin_lock(&irq_controller_lock);
-
        reg = readl(main_int_base + ARMADA_370_XP_INT_SOURCE_CTL(hwirq));
-       reg = (reg & (~online_mask)) | new_mask;
+       reg = (reg & (~ARMADA_370_XP_INT_SOURCE_CPU_MASK)) | mask;
        writel(reg, main_int_base + ARMADA_370_XP_INT_SOURCE_CTL(hwirq));
-
        raw_spin_unlock(&irq_controller_lock);
 
        return 0;
@@ -494,15 +489,6 @@ static int __init armada_370_xp_mpic_of_init(struct device_node *node,
 
 #ifdef CONFIG_SMP
        armada_xp_mpic_smp_cpu_init();
-
-       /*
-        * Set the default affinity from all CPUs to the boot cpu.
-        * This is required since the MPIC doesn't limit several CPUs
-        * from acknowledging the same interrupt.
-        */
-       cpumask_clear(irq_default_affinity);
-       cpumask_set_cpu(smp_processor_id(), irq_default_affinity);
-
 #endif
 
        armada_370_xp_msi_init(node, main_int_res.start);
index fc817d28d1fe50341bc4c9fe0d4de188f599d947..3d15d16a7088d2d886ef769f96534d896ded1b73 100644 (file)
@@ -107,7 +107,7 @@ static int __init crossbar_of_init(struct device_node *node)
        int i, size, max, reserved = 0, entry;
        const __be32 *irqsr;
 
-       cb = kzalloc(sizeof(struct cb_device *), GFP_KERNEL);
+       cb = kzalloc(sizeof(*cb), GFP_KERNEL);
 
        if (!cb)
                return -ENOMEM;
index 4300b6606f5e3276c11656ff29506b665e9a2a87..57d165e026f43ac4ba3f0ba0a0f422025ad2b117 100644 (file)
@@ -246,10 +246,14 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
                            bool force)
 {
        void __iomem *reg = gic_dist_base(d) + GIC_DIST_TARGET + (gic_irq(d) & ~3);
-       unsigned int shift = (gic_irq(d) % 4) * 8;
-       unsigned int cpu = cpumask_any_and(mask_val, cpu_online_mask);
+       unsigned int cpu, shift = (gic_irq(d) % 4) * 8;
        u32 val, mask, bit;
 
+       if (!force)
+               cpu = cpumask_any_and(mask_val, cpu_online_mask);
+       else
+               cpu = cpumask_first(mask_val);
+
        if (cpu >= NR_GIC_CPU_IF || cpu >= nr_cpu_ids)
                return -EINVAL;
 
index 414dbf6da89afd5890c73785cbcb41f3ab903e54..fc9f9d03fa13b879ba382d079cb35a1108b19d0d 100644 (file)
@@ -197,25 +197,6 @@ typedef struct _hfc4s8s_hw {
 
 
 
-/***************************/
-/* inline function defines */
-/***************************/
-#ifdef HISAX_HFC4S8S_PCIMEM    /* inline functions memory mapped */
-
-/* memory write and dummy IO read to avoid PCI byte merge problems */
-#define Write_hfc8(a, b, c) {(*((volatile u_char *)(a->membase + b)) = c); inb(a->iobase + 4);}
-/* memory write without dummy IO access for fifo data access */
-#define fWrite_hfc8(a, b, c) (*((volatile u_char *)(a->membase + b)) = c)
-#define Read_hfc8(a, b) (*((volatile u_char *)(a->membase + b)))
-#define Write_hfc16(a, b, c) (*((volatile unsigned short *)(a->membase + b)) = c)
-#define Read_hfc16(a, b) (*((volatile unsigned short *)(a->membase + b)))
-#define Write_hfc32(a, b, c) (*((volatile unsigned long *)(a->membase + b)) = c)
-#define Read_hfc32(a, b) (*((volatile unsigned long *)(a->membase + b)))
-#define wait_busy(a) {while ((Read_hfc8(a, R_STATUS) & M_BUSY));}
-#define PCI_ENA_MEMIO  0x03
-
-#else
-
 /* inline functions io mapped */
 static inline void
 SetRegAddr(hfc4s8s_hw *a, u_char b)
@@ -306,8 +287,6 @@ wait_busy(hfc4s8s_hw *a)
 
 #define PCI_ENA_REGIO  0x01
 
-#endif                         /* HISAX_HFC4S8S_PCIMEM */
-
 /******************************************************/
 /* function to read critical counter registers that   */
 /* may be updated by the chip during read             */
@@ -724,26 +703,15 @@ rx_d_frame(struct hfc4s8s_l1 *l1p, int ech)
                                return;
                        } else {
                                /* read errornous D frame */
-
-#ifndef HISAX_HFC4S8S_PCIMEM
                                SetRegAddr(l1p->hw, A_FIFO_DATA0);
-#endif
 
                                while (z1 >= 4) {
-#ifdef HISAX_HFC4S8S_PCIMEM
-                                       Read_hfc32(l1p->hw, A_FIFO_DATA0);
-#else
                                        fRead_hfc32(l1p->hw);
-#endif
                                        z1 -= 4;
                                }
 
                                while (z1--)
-#ifdef HISAX_HFC4S8S_PCIMEM
-                                       Read_hfc8(l1p->hw, A_FIFO_DATA0);
-#else
-                               fRead_hfc8(l1p->hw);
-#endif
+                                       fRead_hfc8(l1p->hw);
 
                                Write_hfc8(l1p->hw, A_INC_RES_FIFO, 1);
                                wait_busy(l1p->hw);
@@ -753,27 +721,16 @@ rx_d_frame(struct hfc4s8s_l1 *l1p, int ech)
 
                cp = skb->data;
 
-#ifndef HISAX_HFC4S8S_PCIMEM
                SetRegAddr(l1p->hw, A_FIFO_DATA0);
-#endif
 
                while (z1 >= 4) {
-#ifdef HISAX_HFC4S8S_PCIMEM
-                       *((unsigned long *) cp) =
-                               Read_hfc32(l1p->hw, A_FIFO_DATA0);
-#else
                        *((unsigned long *) cp) = fRead_hfc32(l1p->hw);
-#endif
                        cp += 4;
                        z1 -= 4;
                }
 
                while (z1--)
-#ifdef HISAX_HFC4S8S_PCIMEM
-                       *cp++ = Read_hfc8(l1p->hw, A_FIFO_DATA0);
-#else
-               *cp++ = fRead_hfc8(l1p->hw);
-#endif
+                       *cp++ = fRead_hfc8(l1p->hw);
 
                Write_hfc8(l1p->hw, A_INC_RES_FIFO, 1); /* increment f counter */
                wait_busy(l1p->hw);
@@ -859,28 +816,17 @@ rx_b_frame(struct hfc4s8s_btype *bch)
                        wait_busy(l1->hw);
                        return;
                }
-#ifndef HISAX_HFC4S8S_PCIMEM
                SetRegAddr(l1->hw, A_FIFO_DATA0);
-#endif
 
                while (z1 >= 4) {
-#ifdef HISAX_HFC4S8S_PCIMEM
-                       *((unsigned long *) bch->rx_ptr) =
-                               Read_hfc32(l1->hw, A_FIFO_DATA0);
-#else
                        *((unsigned long *) bch->rx_ptr) =
                                fRead_hfc32(l1->hw);
-#endif
                        bch->rx_ptr += 4;
                        z1 -= 4;
                }
 
                while (z1--)
-#ifdef HISAX_HFC4S8S_PCIMEM
-                       *(bch->rx_ptr++) = Read_hfc8(l1->hw, A_FIFO_DATA0);
-#else
-               *(bch->rx_ptr++) = fRead_hfc8(l1->hw);
-#endif
+                       *(bch->rx_ptr++) = fRead_hfc8(l1->hw);
 
                if (hdlc_complete) {
                        /* increment f counter */
@@ -940,29 +886,17 @@ tx_d_frame(struct hfc4s8s_l1 *l1p)
        if ((skb = skb_dequeue(&l1p->d_tx_queue))) {
                cp = skb->data;
                cnt = skb->len;
-#ifndef HISAX_HFC4S8S_PCIMEM
                SetRegAddr(l1p->hw, A_FIFO_DATA0);
-#endif
 
                while (cnt >= 4) {
-#ifdef HISAX_HFC4S8S_PCIMEM
-                       fWrite_hfc32(l1p->hw, A_FIFO_DATA0,
-                                    *(unsigned long *) cp);
-#else
                        SetRegAddr(l1p->hw, A_FIFO_DATA0);
                        fWrite_hfc32(l1p->hw, *(unsigned long *) cp);
-#endif
                        cp += 4;
                        cnt -= 4;
                }
 
-#ifdef HISAX_HFC4S8S_PCIMEM
-               while (cnt--)
-                       fWrite_hfc8(l1p->hw, A_FIFO_DATA0, *cp++);
-#else
                while (cnt--)
                        fWrite_hfc8(l1p->hw, *cp++);
-#endif
 
                l1p->tx_cnt = skb->truesize;
                Write_hfc8(l1p->hw, A_INC_RES_FIFO, 1); /* increment f counter */
@@ -1037,26 +971,15 @@ tx_b_frame(struct hfc4s8s_btype *bch)
                cp = skb->data + bch->tx_cnt;
                bch->tx_cnt += cnt;
 
-#ifndef HISAX_HFC4S8S_PCIMEM
                SetRegAddr(l1->hw, A_FIFO_DATA0);
-#endif
                while (cnt >= 4) {
-#ifdef HISAX_HFC4S8S_PCIMEM
-                       fWrite_hfc32(l1->hw, A_FIFO_DATA0,
-                                    *(unsigned long *) cp);
-#else
                        fWrite_hfc32(l1->hw, *(unsigned long *) cp);
-#endif
                        cp += 4;
                        cnt -= 4;
                }
 
                while (cnt--)
-#ifdef HISAX_HFC4S8S_PCIMEM
-                       fWrite_hfc8(l1->hw, A_FIFO_DATA0, *cp++);
-#else
-               fWrite_hfc8(l1->hw, *cp++);
-#endif
+                       fWrite_hfc8(l1->hw, *cp++);
 
                if (bch->tx_cnt >= skb->len) {
                        if (bch->mode == L1_MODE_HDLC) {
@@ -1281,10 +1204,8 @@ hfc4s8s_interrupt(int intno, void *dev_id)
        if (!hw || !(hw->mr.r_irq_ctrl & M_GLOB_IRQ_EN))
                return IRQ_NONE;
 
-#ifndef        HISAX_HFC4S8S_PCIMEM
        /* read current selected regsister */
        old_ioreg = GetRegAddr(hw);
-#endif
 
        /* Layer 1 State change */
        hw->mr.r_irq_statech |=
@@ -1292,9 +1213,7 @@ hfc4s8s_interrupt(int intno, void *dev_id)
        if (!
            (b = (Read_hfc8(hw, R_STATUS) & (M_MISC_IRQSTA | M_FR_IRQSTA)))
            && !hw->mr.r_irq_statech) {
-#ifndef        HISAX_HFC4S8S_PCIMEM
                SetRegAddr(hw, old_ioreg);
-#endif
                return IRQ_NONE;
        }
 
@@ -1322,9 +1241,7 @@ hfc4s8s_interrupt(int intno, void *dev_id)
        /* queue the request to allow other cards to interrupt */
        schedule_work(&hw->tqueue);
 
-#ifndef        HISAX_HFC4S8S_PCIMEM
        SetRegAddr(hw, old_ioreg);
-#endif
        return IRQ_HANDLED;
 }                              /* hfc4s8s_interrupt */
 
@@ -1471,13 +1388,8 @@ static void
 release_pci_ports(hfc4s8s_hw *hw)
 {
        pci_write_config_word(hw->pdev, PCI_COMMAND, 0);
-#ifdef HISAX_HFC4S8S_PCIMEM
-       if (hw->membase)
-               iounmap((void *) hw->membase);
-#else
        if (hw->iobase)
                release_region(hw->iobase, 8);
-#endif
 }
 
 /*****************************************/
@@ -1486,11 +1398,7 @@ release_pci_ports(hfc4s8s_hw *hw)
 static void
 enable_pci_ports(hfc4s8s_hw *hw)
 {
-#ifdef HISAX_HFC4S8S_PCIMEM
-       pci_write_config_word(hw->pdev, PCI_COMMAND, PCI_ENA_MEMIO);
-#else
        pci_write_config_word(hw->pdev, PCI_COMMAND, PCI_ENA_REGIO);
-#endif
 }
 
 /*************************************/
@@ -1561,15 +1469,9 @@ setup_instance(hfc4s8s_hw *hw)
                       hw->irq);
                goto out;
        }
-#ifdef HISAX_HFC4S8S_PCIMEM
-       printk(KERN_INFO
-              "HFC-4S/8S: found PCI card at membase 0x%p, irq %d\n",
-              hw->hw_membase, hw->irq);
-#else
        printk(KERN_INFO
               "HFC-4S/8S: found PCI card at iobase 0x%x, irq %d\n",
               hw->iobase, hw->irq);
-#endif
 
        hfc_hardware_enable(hw, 1, 0);
 
@@ -1614,17 +1516,12 @@ hfc4s8s_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        hw->irq = pdev->irq;
        hw->iobase = pci_resource_start(pdev, 0);
 
-#ifdef HISAX_HFC4S8S_PCIMEM
-       hw->hw_membase = (u_char *) pci_resource_start(pdev, 1);
-       hw->membase = ioremap((ulong) hw->hw_membase, 256);
-#else
        if (!request_region(hw->iobase, 8, hw->card_name)) {
                printk(KERN_INFO
                       "HFC-4S/8S: failed to request address space at 0x%04x\n",
                       hw->iobase);
                goto out;
        }
-#endif
 
        pci_set_drvdata(pdev, hw);
        err = setup_instance(hw);
index 51dae9167238a3ed1ff972508a02f338de6702a0..96d1df05044fb48ffceb988dd90540db9f125cdd 100644 (file)
@@ -425,7 +425,7 @@ afterXPR:
                                if (cs->debug & L1_DEB_MONITOR)
                                        debugl1(cs, "ICC %02x -> MOX1", cs->dc.icc.mon_tx[cs->dc.icc.mon_txp - 1]);
                        }
-               AfterMOX1:
+               AfterMOX1: ;
 #endif
                }
        }
index 2c0d2c2bf94648e273b7614a65e1bc09976663e8..9f454d76cc060984317e31310b60e34080e0c6c5 100644 (file)
@@ -287,11 +287,9 @@ l1oip_socket_send(struct l1oip *hc, u8 localcodec, u8 channel, u32 chanmask,
        p = frame;
 
        /* restart timer */
-       if ((int)(hc->keep_tl.expires-jiffies) < 5 * HZ) {
-               del_timer(&hc->keep_tl);
-               hc->keep_tl.expires = jiffies + L1OIP_KEEPALIVE * HZ;
-               add_timer(&hc->keep_tl);
-       } else
+       if (time_before(hc->keep_tl.expires, jiffies + 5 * HZ))
+               mod_timer(&hc->keep_tl, jiffies + L1OIP_KEEPALIVE * HZ);
+       else
                hc->keep_tl.expires = jiffies + L1OIP_KEEPALIVE * HZ;
 
        if (debug & DEBUG_L1OIP_MSG)
@@ -621,11 +619,9 @@ multiframe:
                goto multiframe;
 
        /* restart timer */
-       if ((int)(hc->timeout_tl.expires-jiffies) < 5 * HZ || !hc->timeout_on) {
+       if (time_before(hc->timeout_tl.expires, jiffies + 5 * HZ) || !hc->timeout_on) {
                hc->timeout_on = 1;
-               del_timer(&hc->timeout_tl);
-               hc->timeout_tl.expires = jiffies + L1OIP_TIMEOUT * HZ;
-               add_timer(&hc->timeout_tl);
+               mod_timer(&hc->timeout_tl, jiffies + L1OIP_TIMEOUT * HZ);
        } else /* only adjust timer */
                hc->timeout_tl.expires = jiffies + L1OIP_TIMEOUT * HZ;
 
index 1bf4a71919ec73957a00550dec49b3a3b3a1292c..9380be7b18954b9308ed42abe5fafa2f87c0f76a 100644 (file)
@@ -2488,6 +2488,7 @@ static int cache_map(struct dm_target *ti, struct bio *bio)
 
                } else {
                        inc_hit_counter(cache, bio);
+                       pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds);
 
                        if (bio_data_dir(bio) == WRITE && writethrough_mode(&cache->features) &&
                            !is_dirty(cache, lookup_result.cblock))
index 53728be84dee35ac8dfabbf48087919841049f1a..13abade76ad9bbd65c83c67d35f075506b17b63f 100644 (file)
@@ -232,6 +232,13 @@ struct thin_c {
        struct bio_list deferred_bio_list;
        struct bio_list retry_on_resume_list;
        struct rb_root sort_bio_list; /* sorted list of deferred bios */
+
+       /*
+        * Ensures the thin is not destroyed until the worker has finished
+        * iterating the active_thins list.
+        */
+       atomic_t refcount;
+       struct completion can_destroy;
 };
 
 /*----------------------------------------------------------------*/
@@ -1486,6 +1493,45 @@ static void process_thin_deferred_bios(struct thin_c *tc)
        blk_finish_plug(&plug);
 }
 
+static void thin_get(struct thin_c *tc);
+static void thin_put(struct thin_c *tc);
+
+/*
+ * We can't hold rcu_read_lock() around code that can block.  So we
+ * find a thin with the rcu lock held; bump a refcount; then drop
+ * the lock.
+ */
+static struct thin_c *get_first_thin(struct pool *pool)
+{
+       struct thin_c *tc = NULL;
+
+       rcu_read_lock();
+       if (!list_empty(&pool->active_thins)) {
+               tc = list_entry_rcu(pool->active_thins.next, struct thin_c, list);
+               thin_get(tc);
+       }
+       rcu_read_unlock();
+
+       return tc;
+}
+
+static struct thin_c *get_next_thin(struct pool *pool, struct thin_c *tc)
+{
+       struct thin_c *old_tc = tc;
+
+       rcu_read_lock();
+       list_for_each_entry_continue_rcu(tc, &pool->active_thins, list) {
+               thin_get(tc);
+               thin_put(old_tc);
+               rcu_read_unlock();
+               return tc;
+       }
+       thin_put(old_tc);
+       rcu_read_unlock();
+
+       return NULL;
+}
+
 static void process_deferred_bios(struct pool *pool)
 {
        unsigned long flags;
@@ -1493,10 +1539,11 @@ static void process_deferred_bios(struct pool *pool)
        struct bio_list bios;
        struct thin_c *tc;
 
-       rcu_read_lock();
-       list_for_each_entry_rcu(tc, &pool->active_thins, list)
+       tc = get_first_thin(pool);
+       while (tc) {
                process_thin_deferred_bios(tc);
-       rcu_read_unlock();
+               tc = get_next_thin(pool, tc);
+       }
 
        /*
         * If there are any deferred flush bios, we must commit
@@ -1578,7 +1625,7 @@ static void noflush_work(struct thin_c *tc, void (*fn)(struct work_struct *))
 {
        struct noflush_work w;
 
-       INIT_WORK(&w.worker, fn);
+       INIT_WORK_ONSTACK(&w.worker, fn);
        w.tc = tc;
        atomic_set(&w.complete, 0);
        init_waitqueue_head(&w.wait);
@@ -3061,11 +3108,25 @@ static struct target_type pool_target = {
 /*----------------------------------------------------------------
  * Thin target methods
  *--------------------------------------------------------------*/
+static void thin_get(struct thin_c *tc)
+{
+       atomic_inc(&tc->refcount);
+}
+
+static void thin_put(struct thin_c *tc)
+{
+       if (atomic_dec_and_test(&tc->refcount))
+               complete(&tc->can_destroy);
+}
+
 static void thin_dtr(struct dm_target *ti)
 {
        struct thin_c *tc = ti->private;
        unsigned long flags;
 
+       thin_put(tc);
+       wait_for_completion(&tc->can_destroy);
+
        spin_lock_irqsave(&tc->pool->lock, flags);
        list_del_rcu(&tc->list);
        spin_unlock_irqrestore(&tc->pool->lock, flags);
@@ -3101,6 +3162,7 @@ static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
        struct thin_c *tc;
        struct dm_dev *pool_dev, *origin_dev;
        struct mapped_device *pool_md;
+       unsigned long flags;
 
        mutex_lock(&dm_thin_pool_table.mutex);
 
@@ -3191,9 +3253,12 @@ static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
 
        mutex_unlock(&dm_thin_pool_table.mutex);
 
-       spin_lock(&tc->pool->lock);
+       atomic_set(&tc->refcount, 1);
+       init_completion(&tc->can_destroy);
+
+       spin_lock_irqsave(&tc->pool->lock, flags);
        list_add_tail_rcu(&tc->list, &tc->pool->active_thins);
-       spin_unlock(&tc->pool->lock);
+       spin_unlock_irqrestore(&tc->pool->lock, flags);
        /*
         * This synchronize_rcu() call is needed here otherwise we risk a
         * wake_worker() call finding no bios to process (because the newly
index 796007a5e0e1a4b6e83b0871c1fca1ef8c0c461f..7a7bab8947ae3485d31c132cb3398251c7d507cf 100644 (file)
@@ -330,15 +330,17 @@ test_block_hash:
                                return r;
                        }
                }
-
                todo = 1 << v->data_dev_block_bits;
-               while (io->iter.bi_size) {
+               do {
                        u8 *page;
+                       unsigned len;
                        struct bio_vec bv = bio_iter_iovec(bio, io->iter);
 
                        page = kmap_atomic(bv.bv_page);
-                       r = crypto_shash_update(desc, page + bv.bv_offset,
-                                               bv.bv_len);
+                       len = bv.bv_len;
+                       if (likely(len >= todo))
+                               len = todo;
+                       r = crypto_shash_update(desc, page + bv.bv_offset, len);
                        kunmap_atomic(page);
 
                        if (r < 0) {
@@ -346,8 +348,9 @@ test_block_hash:
                                return r;
                        }
 
-                       bio_advance_iter(bio, &io->iter, bv.bv_len);
-               }
+                       bio_advance_iter(bio, &io->iter, len);
+                       todo -= len;
+               } while (todo);
 
                if (!v->version) {
                        r = crypto_shash_update(desc, v->salt, v->salt_size);
index c137abfa0c543dd33f15e3888ab4590900733bba..20f1655e6d7595c328b5499476f4dde7773cccd0 100644 (file)
@@ -56,7 +56,7 @@ config VIDEO_VIU
 
 config VIDEO_TIMBERDALE
        tristate "Support for timberdale Video In/LogiWIN"
-       depends on VIDEO_V4L2 && I2C && DMADEVICES
+       depends on MFD_TIMBERDALE && VIDEO_V4L2 && I2C && DMADEVICES
        select DMA_ENGINE
        select TIMB_DMA
        select VIDEO_ADV7180
index 7ff473c871a9a249bd02007c2176ec5c471d7626..8d659e6a1b4c0899e32706b8bfa7fe3270ff715a 100644 (file)
@@ -431,7 +431,7 @@ int ubiblock_create(struct ubi_volume_info *vi)
         * Create one workqueue per volume (per registered block device).
         * Rembember workqueues are cheap, they're not threads.
         */
-       dev->wq = alloc_workqueue(gd->disk_name, 0, 0);
+       dev->wq = alloc_workqueue("%s", 0, 0, gd->disk_name);
        if (!dev->wq)
                goto out_free_queue;
        INIT_WORK(&dev->work, ubiblock_do_work);
index 02317c1c02385914c94175fa8757089c677e2b94..0f3425dac91046300f93587d4f341e080c98e322 100644 (file)
@@ -671,6 +671,8 @@ static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi)
 
        e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
        self_check_in_wl_tree(ubi, e, &ubi->free);
+       ubi->free_count--;
+       ubi_assert(ubi->free_count >= 0);
        rb_erase(&e->u.rb, &ubi->free);
 
        return e;
@@ -684,6 +686,9 @@ int ubi_wl_get_peb(struct ubi_device *ubi)
        peb = __wl_get_peb(ubi);
        spin_unlock(&ubi->wl_lock);
 
+       if (peb < 0)
+               return peb;
+
        err = ubi_self_check_all_ff(ubi, peb, ubi->vid_hdr_aloffset,
                                    ubi->peb_size - ubi->vid_hdr_aloffset);
        if (err) {
@@ -1068,6 +1073,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
 
                        /* Give the unused PEB back */
                        wl_tree_add(e2, &ubi->free);
+                       ubi->free_count++;
                        goto out_cancel;
                }
                self_check_in_wl_tree(ubi, e1, &ubi->used);
index b667a51ed21517a3ee6cf2be6ab4c7e306a713a2..0dfeaf5da3f2c914e2fd43e40d4bf4f3d8aff079 100644 (file)
@@ -157,7 +157,7 @@ static inline struct aggregator *__get_first_agg(struct port *port)
 
        rcu_read_lock();
        first_slave = bond_first_slave_rcu(bond);
-       agg = first_slave ? &(SLAVE_AD_INFO(first_slave).aggregator) : NULL;
+       agg = first_slave ? &(SLAVE_AD_INFO(first_slave)->aggregator) : NULL;
        rcu_read_unlock();
 
        return agg;
@@ -192,7 +192,7 @@ static inline void __enable_port(struct port *port)
 {
        struct slave *slave = port->slave;
 
-       if ((slave->link == BOND_LINK_UP) && IS_UP(slave->dev))
+       if ((slave->link == BOND_LINK_UP) && bond_slave_is_up(slave))
                bond_set_slave_active_flags(slave, BOND_SLAVE_NOTIFY_LATER);
 }
 
@@ -241,7 +241,7 @@ static inline int __check_agg_selection_timer(struct port *port)
  */
 static inline void __get_state_machine_lock(struct port *port)
 {
-       spin_lock_bh(&(SLAVE_AD_INFO(port->slave).state_machine_lock));
+       spin_lock_bh(&(SLAVE_AD_INFO(port->slave)->state_machine_lock));
 }
 
 /**
@@ -250,7 +250,7 @@ static inline void __get_state_machine_lock(struct port *port)
  */
 static inline void __release_state_machine_lock(struct port *port)
 {
-       spin_unlock_bh(&(SLAVE_AD_INFO(port->slave).state_machine_lock));
+       spin_unlock_bh(&(SLAVE_AD_INFO(port->slave)->state_machine_lock));
 }
 
 /**
@@ -350,7 +350,7 @@ static u8 __get_duplex(struct port *port)
 static inline void __initialize_port_locks(struct slave *slave)
 {
        /* make sure it isn't called twice */
-       spin_lock_init(&(SLAVE_AD_INFO(slave).state_machine_lock));
+       spin_lock_init(&(SLAVE_AD_INFO(slave)->state_machine_lock));
 }
 
 /* Conversions */
@@ -688,8 +688,8 @@ static struct aggregator *__get_active_agg(struct aggregator *aggregator)
        struct slave *slave;
 
        bond_for_each_slave_rcu(bond, slave, iter)
-               if (SLAVE_AD_INFO(slave).aggregator.is_active)
-                       return &(SLAVE_AD_INFO(slave).aggregator);
+               if (SLAVE_AD_INFO(slave)->aggregator.is_active)
+                       return &(SLAVE_AD_INFO(slave)->aggregator);
 
        return NULL;
 }
@@ -1293,7 +1293,7 @@ static void ad_port_selection_logic(struct port *port)
        }
        /* search on all aggregators for a suitable aggregator for this port */
        bond_for_each_slave(bond, slave, iter) {
-               aggregator = &(SLAVE_AD_INFO(slave).aggregator);
+               aggregator = &(SLAVE_AD_INFO(slave)->aggregator);
 
                /* keep a free aggregator for later use(if needed) */
                if (!aggregator->lag_ports) {
@@ -1504,7 +1504,7 @@ static void ad_agg_selection_logic(struct aggregator *agg)
        best = (active && agg_device_up(active)) ? active : NULL;
 
        bond_for_each_slave_rcu(bond, slave, iter) {
-               agg = &(SLAVE_AD_INFO(slave).aggregator);
+               agg = &(SLAVE_AD_INFO(slave)->aggregator);
 
                agg->is_active = 0;
 
@@ -1549,7 +1549,7 @@ static void ad_agg_selection_logic(struct aggregator *agg)
                         best->slave ? best->slave->dev->name : "NULL");
 
                bond_for_each_slave_rcu(bond, slave, iter) {
-                       agg = &(SLAVE_AD_INFO(slave).aggregator);
+                       agg = &(SLAVE_AD_INFO(slave)->aggregator);
 
                        pr_debug("Agg=%d; P=%d; a k=%d; p k=%d; Ind=%d; Act=%d\n",
                                 agg->aggregator_identifier, agg->num_of_ports,
@@ -1840,16 +1840,16 @@ void bond_3ad_bind_slave(struct slave *slave)
        struct aggregator *aggregator;
 
        /* check that the slave has not been initialized yet. */
-       if (SLAVE_AD_INFO(slave).port.slave != slave) {
+       if (SLAVE_AD_INFO(slave)->port.slave != slave) {
 
                /* port initialization */
-               port = &(SLAVE_AD_INFO(slave).port);
+               port = &(SLAVE_AD_INFO(slave)->port);
 
                ad_initialize_port(port, bond->params.lacp_fast);
 
                __initialize_port_locks(slave);
                port->slave = slave;
-               port->actor_port_number = SLAVE_AD_INFO(slave).id;
+               port->actor_port_number = SLAVE_AD_INFO(slave)->id;
                /* key is determined according to the link speed, duplex and user key(which
                 * is yet not supported)
                 */
@@ -1874,7 +1874,7 @@ void bond_3ad_bind_slave(struct slave *slave)
                __disable_port(port);
 
                /* aggregator initialization */
-               aggregator = &(SLAVE_AD_INFO(slave).aggregator);
+               aggregator = &(SLAVE_AD_INFO(slave)->aggregator);
 
                ad_initialize_agg(aggregator);
 
@@ -1903,8 +1903,8 @@ void bond_3ad_unbind_slave(struct slave *slave)
        struct slave *slave_iter;
        struct list_head *iter;
 
-       aggregator = &(SLAVE_AD_INFO(slave).aggregator);
-       port = &(SLAVE_AD_INFO(slave).port);
+       aggregator = &(SLAVE_AD_INFO(slave)->aggregator);
+       port = &(SLAVE_AD_INFO(slave)->port);
 
        /* if slave is null, the whole port is not initialized */
        if (!port->slave) {
@@ -1932,7 +1932,7 @@ void bond_3ad_unbind_slave(struct slave *slave)
                    (aggregator->lag_ports->next_port_in_aggregator)) {
                        /* find new aggregator for the related port(s) */
                        bond_for_each_slave(bond, slave_iter, iter) {
-                               new_aggregator = &(SLAVE_AD_INFO(slave_iter).aggregator);
+                               new_aggregator = &(SLAVE_AD_INFO(slave_iter)->aggregator);
                                /* if the new aggregator is empty, or it is
                                 * connected to our port only
                                 */
@@ -2010,7 +2010,7 @@ void bond_3ad_unbind_slave(struct slave *slave)
 
        /* find the aggregator that this port is connected to */
        bond_for_each_slave(bond, slave_iter, iter) {
-               temp_aggregator = &(SLAVE_AD_INFO(slave_iter).aggregator);
+               temp_aggregator = &(SLAVE_AD_INFO(slave_iter)->aggregator);
                prev_port = NULL;
                /* search the port in the aggregator's related ports */
                for (temp_port = temp_aggregator->lag_ports; temp_port;
@@ -2076,7 +2076,7 @@ void bond_3ad_state_machine_handler(struct work_struct *work)
        if (BOND_AD_INFO(bond).agg_select_timer &&
            !(--BOND_AD_INFO(bond).agg_select_timer)) {
                slave = bond_first_slave_rcu(bond);
-               port = slave ? &(SLAVE_AD_INFO(slave).port) : NULL;
+               port = slave ? &(SLAVE_AD_INFO(slave)->port) : NULL;
 
                /* select the active aggregator for the bond */
                if (port) {
@@ -2094,7 +2094,7 @@ void bond_3ad_state_machine_handler(struct work_struct *work)
 
        /* for each port run the state machines */
        bond_for_each_slave_rcu(bond, slave, iter) {
-               port = &(SLAVE_AD_INFO(slave).port);
+               port = &(SLAVE_AD_INFO(slave)->port);
                if (!port->slave) {
                        pr_warn_ratelimited("%s: Warning: Found an uninitialized port\n",
                                            bond->dev->name);
@@ -2155,7 +2155,7 @@ static int bond_3ad_rx_indication(struct lacpdu *lacpdu, struct slave *slave,
 
        if (length >= sizeof(struct lacpdu)) {
 
-               port = &(SLAVE_AD_INFO(slave).port);
+               port = &(SLAVE_AD_INFO(slave)->port);
 
                if (!port->slave) {
                        pr_warn_ratelimited("%s: Warning: port of slave %s is uninitialized\n",
@@ -2212,7 +2212,7 @@ void bond_3ad_adapter_speed_changed(struct slave *slave)
 {
        struct port *port;
 
-       port = &(SLAVE_AD_INFO(slave).port);
+       port = &(SLAVE_AD_INFO(slave)->port);
 
        /* if slave is null, the whole port is not initialized */
        if (!port->slave) {
@@ -2245,7 +2245,7 @@ void bond_3ad_adapter_duplex_changed(struct slave *slave)
 {
        struct port *port;
 
-       port = &(SLAVE_AD_INFO(slave).port);
+       port = &(SLAVE_AD_INFO(slave)->port);
 
        /* if slave is null, the whole port is not initialized */
        if (!port->slave) {
@@ -2279,7 +2279,7 @@ void bond_3ad_handle_link_change(struct slave *slave, char link)
 {
        struct port *port;
 
-       port = &(SLAVE_AD_INFO(slave).port);
+       port = &(SLAVE_AD_INFO(slave)->port);
 
        /* if slave is null, the whole port is not initialized */
        if (!port->slave) {
@@ -2347,7 +2347,7 @@ int bond_3ad_set_carrier(struct bonding *bond)
                ret = 0;
                goto out;
        }
-       active = __get_active_agg(&(SLAVE_AD_INFO(first_slave).aggregator));
+       active = __get_active_agg(&(SLAVE_AD_INFO(first_slave)->aggregator));
        if (active) {
                /* are enough slaves available to consider link up? */
                if (active->num_of_ports < bond->params.min_links) {
@@ -2384,7 +2384,7 @@ int __bond_3ad_get_active_agg_info(struct bonding *bond,
        struct port *port;
 
        bond_for_each_slave_rcu(bond, slave, iter) {
-               port = &(SLAVE_AD_INFO(slave).port);
+               port = &(SLAVE_AD_INFO(slave)->port);
                if (port->aggregator && port->aggregator->is_active) {
                        aggregator = port->aggregator;
                        break;
@@ -2440,22 +2440,22 @@ int bond_3ad_xmit_xor(struct sk_buff *skb, struct net_device *dev)
                goto err_free;
        }
 
-       slave_agg_no = bond_xmit_hash(bond, skb, slaves_in_agg);
+       slave_agg_no = bond_xmit_hash(bond, skb) % slaves_in_agg;
        first_ok_slave = NULL;
 
        bond_for_each_slave_rcu(bond, slave, iter) {
-               agg = SLAVE_AD_INFO(slave).port.aggregator;
+               agg = SLAVE_AD_INFO(slave)->port.aggregator;
                if (!agg || agg->aggregator_identifier != agg_id)
                        continue;
 
                if (slave_agg_no >= 0) {
-                       if (!first_ok_slave && SLAVE_IS_OK(slave))
+                       if (!first_ok_slave && bond_slave_can_tx(slave))
                                first_ok_slave = slave;
                        slave_agg_no--;
                        continue;
                }
 
-               if (SLAVE_IS_OK(slave)) {
+               if (bond_slave_can_tx(slave)) {
                        bond_dev_queue_xmit(bond, skb, slave->dev);
                        goto out;
                }
@@ -2522,7 +2522,7 @@ void bond_3ad_update_lacp_rate(struct bonding *bond)
 
        lacp_fast = bond->params.lacp_fast;
        bond_for_each_slave(bond, slave, iter) {
-               port = &(SLAVE_AD_INFO(slave).port);
+               port = &(SLAVE_AD_INFO(slave)->port);
                __get_state_machine_lock(port);
                if (lacp_fast)
                        port->actor_oper_port_state |= AD_STATE_LACP_TIMEOUT;
index 9f69e818b0009db7881b3f8c862393836e5a604b..03e0bcade234f1e62cfe8587b5d6ac79672ee00f 100644 (file)
@@ -228,7 +228,7 @@ static struct slave *tlb_get_least_loaded_slave(struct bonding *bond)
 
        /* Find the slave with the largest gap */
        bond_for_each_slave_rcu(bond, slave, iter) {
-               if (SLAVE_IS_OK(slave)) {
+               if (bond_slave_can_tx(slave)) {
                        long long gap = compute_gap(slave);
 
                        if (max_gap < gap) {
@@ -383,7 +383,7 @@ static struct slave *rlb_next_rx_slave(struct bonding *bond)
        bool found = false;
 
        bond_for_each_slave(bond, slave, iter) {
-               if (!SLAVE_IS_OK(slave))
+               if (!bond_slave_can_tx(slave))
                        continue;
                if (!found) {
                        if (!before || before->speed < slave->speed)
@@ -416,7 +416,7 @@ static struct slave *__rlb_next_rx_slave(struct bonding *bond)
        bool found = false;
 
        bond_for_each_slave_rcu(bond, slave, iter) {
-               if (!SLAVE_IS_OK(slave))
+               if (!bond_slave_can_tx(slave))
                        continue;
                if (!found) {
                        if (!before || before->speed < slave->speed)
@@ -1057,7 +1057,7 @@ static int alb_set_slave_mac_addr(struct slave *slave, u8 addr[])
        struct net_device *dev = slave->dev;
        struct sockaddr s_addr;
 
-       if (slave->bond->params.mode == BOND_MODE_TLB) {
+       if (BOND_MODE(slave->bond) == BOND_MODE_TLB) {
                memcpy(dev->dev_addr, addr, dev->addr_len);
                return 0;
        }
@@ -1100,13 +1100,13 @@ static void alb_swap_mac_addr(struct slave *slave1, struct slave *slave2)
 static void alb_fasten_mac_swap(struct bonding *bond, struct slave *slave1,
                                struct slave *slave2)
 {
-       int slaves_state_differ = (SLAVE_IS_OK(slave1) != SLAVE_IS_OK(slave2));
+       int slaves_state_differ = (bond_slave_can_tx(slave1) != bond_slave_can_tx(slave2));
        struct slave *disabled_slave = NULL;
 
        ASSERT_RTNL();
 
        /* fasten the change in the switch */
-       if (SLAVE_IS_OK(slave1)) {
+       if (bond_slave_can_tx(slave1)) {
                alb_send_learning_packets(slave1, slave1->dev->dev_addr);
                if (bond->alb_info.rlb_enabled) {
                        /* inform the clients that the mac address
@@ -1118,7 +1118,7 @@ static void alb_fasten_mac_swap(struct bonding *bond, struct slave *slave1,
                disabled_slave = slave1;
        }
 
-       if (SLAVE_IS_OK(slave2)) {
+       if (bond_slave_can_tx(slave2)) {
                alb_send_learning_packets(slave2, slave2->dev->dev_addr);
                if (bond->alb_info.rlb_enabled) {
                        /* inform the clients that the mac address
@@ -1347,6 +1347,77 @@ void bond_alb_deinitialize(struct bonding *bond)
                rlb_deinitialize(bond);
 }
 
+static int bond_do_alb_xmit(struct sk_buff *skb, struct bonding *bond,
+               struct slave *tx_slave)
+{
+       struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
+       struct ethhdr *eth_data = eth_hdr(skb);
+
+       if (!tx_slave) {
+               /* unbalanced or unassigned, send through primary */
+               tx_slave = rcu_dereference(bond->curr_active_slave);
+               if (bond->params.tlb_dynamic_lb)
+                       bond_info->unbalanced_load += skb->len;
+       }
+
+       if (tx_slave && bond_slave_can_tx(tx_slave)) {
+               if (tx_slave != rcu_dereference(bond->curr_active_slave)) {
+                       ether_addr_copy(eth_data->h_source,
+                                       tx_slave->dev->dev_addr);
+               }
+
+               bond_dev_queue_xmit(bond, skb, tx_slave->dev);
+               goto out;
+       }
+
+       if (tx_slave && bond->params.tlb_dynamic_lb) {
+               _lock_tx_hashtbl(bond);
+               __tlb_clear_slave(bond, tx_slave, 0);
+               _unlock_tx_hashtbl(bond);
+       }
+
+       /* no suitable interface, frame not sent */
+       dev_kfree_skb_any(skb);
+out:
+       return NETDEV_TX_OK;
+}
+
+int bond_tlb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
+{
+       struct bonding *bond = netdev_priv(bond_dev);
+       struct ethhdr *eth_data;
+       struct slave *tx_slave = NULL;
+       u32 hash_index;
+
+       skb_reset_mac_header(skb);
+       eth_data = eth_hdr(skb);
+
+       /* Do not TX balance any multicast or broadcast */
+       if (!is_multicast_ether_addr(eth_data->h_dest)) {
+               switch (skb->protocol) {
+               case htons(ETH_P_IP):
+               case htons(ETH_P_IPX):
+                   /* In case of IPX, it will falback to L2 hash */
+               case htons(ETH_P_IPV6):
+                       hash_index = bond_xmit_hash(bond, skb);
+                       if (bond->params.tlb_dynamic_lb) {
+                               tx_slave = tlb_choose_channel(bond,
+                                                             hash_index & 0xFF,
+                                                             skb->len);
+                       } else {
+                               struct list_head *iter;
+                               int idx = hash_index % bond->slave_cnt;
+
+                               bond_for_each_slave_rcu(bond, tx_slave, iter)
+                                       if (--idx < 0)
+                                               break;
+                       }
+                       break;
+               }
+       }
+       return bond_do_alb_xmit(skb, bond, tx_slave);
+}
+
 int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
 {
        struct bonding *bond = netdev_priv(bond_dev);
@@ -1355,7 +1426,7 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
        struct slave *tx_slave = NULL;
        static const __be32 ip_bcast = htonl(0xffffffff);
        int hash_size = 0;
-       int do_tx_balance = 1;
+       bool do_tx_balance = true;
        u32 hash_index = 0;
        const u8 *hash_start = NULL;
        struct ipv6hdr *ip6hdr;
@@ -1370,7 +1441,7 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
                if (ether_addr_equal_64bits(eth_data->h_dest, mac_bcast) ||
                    (iph->daddr == ip_bcast) ||
                    (iph->protocol == IPPROTO_IGMP)) {
-                       do_tx_balance = 0;
+                       do_tx_balance = false;
                        break;
                }
                hash_start = (char *)&(iph->daddr);
@@ -1382,7 +1453,7 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
                 * that here just in case.
                 */
                if (ether_addr_equal_64bits(eth_data->h_dest, mac_bcast)) {
-                       do_tx_balance = 0;
+                       do_tx_balance = false;
                        break;
                }
 
@@ -1390,7 +1461,7 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
                 * broadcasts in IPv4.
                 */
                if (ether_addr_equal_64bits(eth_data->h_dest, mac_v6_allmcast)) {
-                       do_tx_balance = 0;
+                       do_tx_balance = false;
                        break;
                }
 
@@ -1400,7 +1471,7 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
                 */
                ip6hdr = ipv6_hdr(skb);
                if (ipv6_addr_any(&ip6hdr->saddr)) {
-                       do_tx_balance = 0;
+                       do_tx_balance = false;
                        break;
                }
 
@@ -1410,7 +1481,7 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
        case ETH_P_IPX:
                if (ipx_hdr(skb)->ipx_checksum != IPX_NO_CHECKSUM) {
                        /* something is wrong with this packet */
-                       do_tx_balance = 0;
+                       do_tx_balance = false;
                        break;
                }
 
@@ -1419,7 +1490,7 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
                         * this family since it has an "ARP" like
                         * mechanism
                         */
-                       do_tx_balance = 0;
+                       do_tx_balance = false;
                        break;
                }
 
@@ -1427,12 +1498,12 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
                hash_size = ETH_ALEN;
                break;
        case ETH_P_ARP:
-               do_tx_balance = 0;
+               do_tx_balance = false;
                if (bond_info->rlb_enabled)
                        tx_slave = rlb_arp_xmit(skb, bond);
                break;
        default:
-               do_tx_balance = 0;
+               do_tx_balance = false;
                break;
        }
 
@@ -1441,32 +1512,7 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
                tx_slave = tlb_choose_channel(bond, hash_index, skb->len);
        }
 
-       if (!tx_slave) {
-               /* unbalanced or unassigned, send through primary */
-               tx_slave = rcu_dereference(bond->curr_active_slave);
-               bond_info->unbalanced_load += skb->len;
-       }
-
-       if (tx_slave && SLAVE_IS_OK(tx_slave)) {
-               if (tx_slave != rcu_dereference(bond->curr_active_slave)) {
-                       ether_addr_copy(eth_data->h_source,
-                                       tx_slave->dev->dev_addr);
-               }
-
-               bond_dev_queue_xmit(bond, skb, tx_slave->dev);
-               goto out;
-       }
-
-       if (tx_slave) {
-               _lock_tx_hashtbl(bond);
-               __tlb_clear_slave(bond, tx_slave, 0);
-               _unlock_tx_hashtbl(bond);
-       }
-
-       /* no suitable interface, frame not sent */
-       dev_kfree_skb_any(skb);
-out:
-       return NETDEV_TX_OK;
+       return bond_do_alb_xmit(skb, bond, tx_slave);
 }
 
 void bond_alb_monitor(struct work_struct *work)
@@ -1699,7 +1745,7 @@ void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave
        /* in TLB mode, the slave might flip down/up with the old dev_addr,
         * and thus filter bond->dev_addr's packets, so force bond's mac
         */
-       if (bond->params.mode == BOND_MODE_TLB) {
+       if (BOND_MODE(bond) == BOND_MODE_TLB) {
                struct sockaddr sa;
                u8 tmp_addr[ETH_ALEN];
 
index e09dd4bfafffcf585b8f853f7661e2e416c58602..5fc76c01636cb6eb0e9e96d14fc0c79566741900 100644 (file)
@@ -175,6 +175,7 @@ void bond_alb_deinit_slave(struct bonding *bond, struct slave *slave);
 void bond_alb_handle_link_change(struct bonding *bond, struct slave *slave, char link);
 void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave);
 int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev);
+int bond_tlb_xmit(struct sk_buff *skb, struct net_device *bond_dev);
 void bond_alb_monitor(struct work_struct *);
 int bond_alb_set_mac_address(struct net_device *bond_dev, void *addr);
 void bond_alb_clear_vlan(struct bonding *bond, unsigned short vlan_id);
index 2d3f7fa541ffe755fc1bf5f9e51aeaa464b4e032..658e761c4568dff39ef18db8c548f8ed349c8ee0 100644 (file)
@@ -23,7 +23,7 @@ static int bond_debug_rlb_hash_show(struct seq_file *m, void *v)
        struct rlb_client_info *client_info;
        u32 hash_index;
 
-       if (bond->params.mode != BOND_MODE_ALB)
+       if (BOND_MODE(bond) != BOND_MODE_ALB)
                return 0;
 
        seq_printf(m, "SourceIP        DestinationIP   "
index 69aff72c895716fe6c579d2bf7f46c79ddca2a36..499645b0925c84109d913f25c20b1eb57bd214a9 100644 (file)
@@ -343,7 +343,7 @@ static int bond_set_carrier(struct bonding *bond)
        if (!bond_has_slaves(bond))
                goto down;
 
-       if (bond->params.mode == BOND_MODE_8023AD)
+       if (BOND_MODE(bond) == BOND_MODE_8023AD)
                return bond_3ad_set_carrier(bond);
 
        bond_for_each_slave(bond, slave, iter) {
@@ -497,7 +497,7 @@ static int bond_set_promiscuity(struct bonding *bond, int inc)
        struct list_head *iter;
        int err = 0;
 
-       if (USES_PRIMARY(bond->params.mode)) {
+       if (bond_uses_primary(bond)) {
                /* write lock already acquired */
                if (bond->curr_active_slave) {
                        err = dev_set_promiscuity(bond->curr_active_slave->dev,
@@ -523,7 +523,7 @@ static int bond_set_allmulti(struct bonding *bond, int inc)
        struct list_head *iter;
        int err = 0;
 
-       if (USES_PRIMARY(bond->params.mode)) {
+       if (bond_uses_primary(bond)) {
                /* write lock already acquired */
                if (bond->curr_active_slave) {
                        err = dev_set_allmulti(bond->curr_active_slave->dev,
@@ -574,7 +574,7 @@ static void bond_hw_addr_flush(struct net_device *bond_dev,
        dev_uc_unsync(slave_dev, bond_dev);
        dev_mc_unsync(slave_dev, bond_dev);
 
-       if (bond->params.mode == BOND_MODE_8023AD) {
+       if (BOND_MODE(bond) == BOND_MODE_8023AD) {
                /* del lacpdu mc addr from mc list */
                u8 lacpdu_multicast[ETH_ALEN] = MULTICAST_LACPDU_ADDR;
 
@@ -585,8 +585,8 @@ static void bond_hw_addr_flush(struct net_device *bond_dev,
 /*--------------------------- Active slave change ---------------------------*/
 
 /* Update the hardware address list and promisc/allmulti for the new and
- * old active slaves (if any).  Modes that are !USES_PRIMARY keep all
- * slaves up date at all times; only the USES_PRIMARY modes need to call
+ * old active slaves (if any).  Modes that are not using primary keep all
+ * slaves up date at all times; only the modes that use primary need to call
  * this function to swap these settings during a failover.
  */
 static void bond_hw_addr_swap(struct bonding *bond, struct slave *new_active,
@@ -747,7 +747,7 @@ static struct slave *bond_find_best_slave(struct bonding *bond)
        bond_for_each_slave(bond, slave, iter) {
                if (slave->link == BOND_LINK_UP)
                        return slave;
-               if (slave->link == BOND_LINK_BACK && IS_UP(slave->dev) &&
+               if (slave->link == BOND_LINK_BACK && bond_slave_is_up(slave) &&
                    slave->delay < mintime) {
                        mintime = slave->delay;
                        bestslave = slave;
@@ -801,7 +801,7 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active)
                new_active->last_link_up = jiffies;
 
                if (new_active->link == BOND_LINK_BACK) {
-                       if (USES_PRIMARY(bond->params.mode)) {
+                       if (bond_uses_primary(bond)) {
                                pr_info("%s: making interface %s the new active one %d ms earlier\n",
                                        bond->dev->name, new_active->dev->name,
                                        (bond->params.updelay - new_active->delay) * bond->params.miimon);
@@ -810,20 +810,20 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active)
                        new_active->delay = 0;
                        new_active->link = BOND_LINK_UP;
 
-                       if (bond->params.mode == BOND_MODE_8023AD)
+                       if (BOND_MODE(bond) == BOND_MODE_8023AD)
                                bond_3ad_handle_link_change(new_active, BOND_LINK_UP);
 
                        if (bond_is_lb(bond))
                                bond_alb_handle_link_change(bond, new_active, BOND_LINK_UP);
                } else {
-                       if (USES_PRIMARY(bond->params.mode)) {
+                       if (bond_uses_primary(bond)) {
                                pr_info("%s: making interface %s the new active one\n",
                                        bond->dev->name, new_active->dev->name);
                        }
                }
        }
 
-       if (USES_PRIMARY(bond->params.mode))
+       if (bond_uses_primary(bond))
                bond_hw_addr_swap(bond, new_active, old_active);
 
        if (bond_is_lb(bond)) {
@@ -838,7 +838,7 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active)
                rcu_assign_pointer(bond->curr_active_slave, new_active);
        }
 
-       if (bond->params.mode == BOND_MODE_ACTIVEBACKUP) {
+       if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP) {
                if (old_active)
                        bond_set_slave_inactive_flags(old_active,
                                                      BOND_SLAVE_NOTIFY_NOW);
@@ -876,8 +876,8 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active)
         * resend only if bond is brought up with the affected
         * bonding modes and the retransmission is enabled */
        if (netif_running(bond->dev) && (bond->params.resend_igmp > 0) &&
-           ((USES_PRIMARY(bond->params.mode) && new_active) ||
-            bond->params.mode == BOND_MODE_ROUNDROBIN)) {
+           ((bond_uses_primary(bond) && new_active) ||
+            BOND_MODE(bond) == BOND_MODE_ROUNDROBIN)) {
                bond->igmp_retrans = bond->params.resend_igmp;
                queue_delayed_work(bond->wq, &bond->mcast_work, 1);
        }
@@ -958,7 +958,7 @@ static void bond_netpoll_cleanup(struct net_device *bond_dev)
        struct slave *slave;
 
        bond_for_each_slave(bond, slave, iter)
-               if (IS_UP(slave->dev))
+               if (bond_slave_is_up(slave))
                        slave_disable_netpoll(slave);
 }
 
@@ -1038,6 +1038,7 @@ static void bond_compute_features(struct bonding *bond)
 
        if (!bond_has_slaves(bond))
                goto done;
+       vlan_features &= NETIF_F_ALL_FOR_ALL;
 
        bond_for_each_slave(bond, slave, iter) {
                vlan_features = netdev_increment_features(vlan_features,
@@ -1084,7 +1085,7 @@ static bool bond_should_deliver_exact_match(struct sk_buff *skb,
                                            struct bonding *bond)
 {
        if (bond_is_slave_inactive(slave)) {
-               if (bond->params.mode == BOND_MODE_ALB &&
+               if (BOND_MODE(bond) == BOND_MODE_ALB &&
                    skb->pkt_type != PACKET_BROADCAST &&
                    skb->pkt_type != PACKET_MULTICAST)
                        return false;
@@ -1126,7 +1127,7 @@ static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb)
 
        skb->dev = bond->dev;
 
-       if (bond->params.mode == BOND_MODE_ALB &&
+       if (BOND_MODE(bond) == BOND_MODE_ALB &&
            bond->dev->priv_flags & IFF_BRIDGE_PORT &&
            skb->pkt_type == PACKET_HOST) {
 
@@ -1163,6 +1164,35 @@ static void bond_upper_dev_unlink(struct net_device *bond_dev,
        rtmsg_ifinfo(RTM_NEWLINK, slave_dev, IFF_SLAVE, GFP_KERNEL);
 }
 
+static struct slave *bond_alloc_slave(struct bonding *bond)
+{
+       struct slave *slave = NULL;
+
+       slave = kzalloc(sizeof(struct slave), GFP_KERNEL);
+       if (!slave)
+               return NULL;
+
+       if (BOND_MODE(bond) == BOND_MODE_8023AD) {
+               SLAVE_AD_INFO(slave) = kzalloc(sizeof(struct ad_slave_info),
+                                              GFP_KERNEL);
+               if (!SLAVE_AD_INFO(slave)) {
+                       kfree(slave);
+                       return NULL;
+               }
+       }
+       return slave;
+}
+
+static void bond_free_slave(struct slave *slave)
+{
+       struct bonding *bond = bond_get_bond_by_slave(slave);
+
+       if (BOND_MODE(bond) == BOND_MODE_8023AD)
+               kfree(SLAVE_AD_INFO(slave));
+
+       kfree(slave);
+}
+
 /* enslave device <slave> to bond device <master> */
 int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
 {
@@ -1269,7 +1299,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
                if (!bond_has_slaves(bond)) {
                        pr_warn("%s: Warning: The first slave device specified does not support setting the MAC address\n",
                                bond_dev->name);
-                       if (bond->params.mode == BOND_MODE_ACTIVEBACKUP) {
+                       if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP) {
                                bond->params.fail_over_mac = BOND_FOM_ACTIVE;
                                pr_warn("%s: Setting fail_over_mac to active for active-backup mode\n",
                                        bond_dev->name);
@@ -1290,11 +1320,14 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
            bond->dev->addr_assign_type == NET_ADDR_RANDOM)
                bond_set_dev_addr(bond->dev, slave_dev);
 
-       new_slave = kzalloc(sizeof(struct slave), GFP_KERNEL);
+       new_slave = bond_alloc_slave(bond);
        if (!new_slave) {
                res = -ENOMEM;
                goto err_undo_flags;
        }
+
+       new_slave->bond = bond;
+       new_slave->dev = slave_dev;
        /*
         * Set the new_slave's queue_id to be zero.  Queue ID mapping
         * is set via sysfs or module option if desired.
@@ -1317,7 +1350,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
        ether_addr_copy(new_slave->perm_hwaddr, slave_dev->dev_addr);
 
        if (!bond->params.fail_over_mac ||
-           bond->params.mode != BOND_MODE_ACTIVEBACKUP) {
+           BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) {
                /*
                 * Set slave to master's mac address.  The application already
                 * set the master's mac address to that of the first slave
@@ -1338,8 +1371,6 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
                goto err_restore_mac;
        }
 
-       new_slave->bond = bond;
-       new_slave->dev = slave_dev;
        slave_dev->priv_flags |= IFF_BONDING;
 
        if (bond_is_lb(bond)) {
@@ -1351,10 +1382,10 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
                        goto err_close;
        }
 
-       /* If the mode USES_PRIMARY, then the following is handled by
+       /* If the mode uses primary, then the following is handled by
         * bond_change_active_slave().
         */
-       if (!USES_PRIMARY(bond->params.mode)) {
+       if (!bond_uses_primary(bond)) {
                /* set promiscuity level to new slave */
                if (bond_dev->flags & IFF_PROMISC) {
                        res = dev_set_promiscuity(slave_dev, 1);
@@ -1377,7 +1408,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
                netif_addr_unlock_bh(bond_dev);
        }
 
-       if (bond->params.mode == BOND_MODE_8023AD) {
+       if (BOND_MODE(bond) == BOND_MODE_8023AD) {
                /* add lacpdu mc addr to mc list */
                u8 lacpdu_multicast[ETH_ALEN] = MULTICAST_LACPDU_ADDR;
 
@@ -1450,7 +1481,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
                 new_slave->link == BOND_LINK_DOWN ? "DOWN" :
                 (new_slave->link == BOND_LINK_UP ? "UP" : "BACK"));
 
-       if (USES_PRIMARY(bond->params.mode) && bond->params.primary[0]) {
+       if (bond_uses_primary(bond) && bond->params.primary[0]) {
                /* if there is a primary slave, remember it */
                if (strcmp(bond->params.primary, new_slave->dev->name) == 0) {
                        bond->primary_slave = new_slave;
@@ -1458,7 +1489,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
                }
        }
 
-       switch (bond->params.mode) {
+       switch (BOND_MODE(bond)) {
        case BOND_MODE_ACTIVEBACKUP:
                bond_set_slave_inactive_flags(new_slave,
                                              BOND_SLAVE_NOTIFY_NOW);
@@ -1471,14 +1502,14 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
                bond_set_slave_inactive_flags(new_slave, BOND_SLAVE_NOTIFY_NOW);
                /* if this is the first slave */
                if (!prev_slave) {
-                       SLAVE_AD_INFO(new_slave).id = 1;
+                       SLAVE_AD_INFO(new_slave)->id = 1;
                        /* Initialize AD with the number of times that the AD timer is called in 1 second
                         * can be called only after the mac address of the bond is set
                         */
                        bond_3ad_initialize(bond, 1000/AD_TIMER_INTERVAL);
                } else {
-                       SLAVE_AD_INFO(new_slave).id =
-                               SLAVE_AD_INFO(prev_slave).id + 1;
+                       SLAVE_AD_INFO(new_slave)->id =
+                               SLAVE_AD_INFO(prev_slave)->id + 1;
                }
 
                bond_3ad_bind_slave(new_slave);
@@ -1539,7 +1570,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
        bond_compute_features(bond);
        bond_set_carrier(bond);
 
-       if (USES_PRIMARY(bond->params.mode)) {
+       if (bond_uses_primary(bond)) {
                block_netpoll_tx();
                write_lock_bh(&bond->curr_slave_lock);
                bond_select_active_slave(bond);
@@ -1563,7 +1594,7 @@ err_unregister:
        netdev_rx_handler_unregister(slave_dev);
 
 err_detach:
-       if (!USES_PRIMARY(bond->params.mode))
+       if (!bond_uses_primary(bond))
                bond_hw_addr_flush(bond_dev, slave_dev);
 
        vlan_vids_del_by_dev(slave_dev, bond_dev);
@@ -1585,7 +1616,7 @@ err_close:
 
 err_restore_mac:
        if (!bond->params.fail_over_mac ||
-           bond->params.mode != BOND_MODE_ACTIVEBACKUP) {
+           BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) {
                /* XXX TODO - fom follow mode needs to change master's
                 * MAC if this slave's MAC is in use by the bond, or at
                 * least print a warning.
@@ -1599,7 +1630,7 @@ err_restore_mtu:
        dev_set_mtu(slave_dev, new_slave->original_mtu);
 
 err_free:
-       kfree(new_slave);
+       bond_free_slave(new_slave);
 
 err_undo_flags:
        /* Enslave of first slave has failed and we need to fix master's mac */
@@ -1661,7 +1692,7 @@ static int __bond_release_one(struct net_device *bond_dev,
        write_lock_bh(&bond->lock);
 
        /* Inform AD package of unbinding of slave. */
-       if (bond->params.mode == BOND_MODE_8023AD)
+       if (BOND_MODE(bond) == BOND_MODE_8023AD)
                bond_3ad_unbind_slave(slave);
 
        write_unlock_bh(&bond->lock);
@@ -1676,7 +1707,7 @@ static int __bond_release_one(struct net_device *bond_dev,
        bond->current_arp_slave = NULL;
 
        if (!all && (!bond->params.fail_over_mac ||
-                    bond->params.mode != BOND_MODE_ACTIVEBACKUP)) {
+                    BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP)) {
                if (ether_addr_equal_64bits(bond_dev->dev_addr, slave->perm_hwaddr) &&
                    bond_has_slaves(bond))
                        pr_warn("%s: Warning: the permanent HWaddr of %s - %pM - is still in use by %s - set the HWaddr of %s to a different address to avoid conflicts\n",
@@ -1748,10 +1779,10 @@ static int __bond_release_one(struct net_device *bond_dev,
        /* must do this from outside any spinlocks */
        vlan_vids_del_by_dev(slave_dev, bond_dev);
 
-       /* If the mode USES_PRIMARY, then this cases was handled above by
+       /* If the mode uses primary, then this cases was handled above by
         * bond_change_active_slave(..., NULL)
         */
-       if (!USES_PRIMARY(bond->params.mode)) {
+       if (!bond_uses_primary(bond)) {
                /* unset promiscuity level from slave
                 * NOTE: The NETDEV_CHANGEADDR call above may change the value
                 * of the IFF_PROMISC flag in the bond_dev, but we need the
@@ -1775,7 +1806,7 @@ static int __bond_release_one(struct net_device *bond_dev,
        dev_close(slave_dev);
 
        if (bond->params.fail_over_mac != BOND_FOM_ACTIVE ||
-           bond->params.mode != BOND_MODE_ACTIVEBACKUP) {
+           BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) {
                /* restore original ("permanent") mac address */
                ether_addr_copy(addr.sa_data, slave->perm_hwaddr);
                addr.sa_family = slave_dev->type;
@@ -1786,7 +1817,7 @@ static int __bond_release_one(struct net_device *bond_dev,
 
        slave_dev->priv_flags &= ~IFF_BONDING;
 
-       kfree(slave);
+       bond_free_slave(slave);
 
        return 0;  /* deletion OK */
 }
@@ -1821,7 +1852,7 @@ static int bond_info_query(struct net_device *bond_dev, struct ifbond *info)
 {
        struct bonding *bond = netdev_priv(bond_dev);
 
-       info->bond_mode = bond->params.mode;
+       info->bond_mode = BOND_MODE(bond);
        info->miimon = bond->params.miimon;
 
        info->num_slaves = bond->slave_cnt;
@@ -1877,7 +1908,7 @@ static int bond_miimon_inspect(struct bonding *bond)
                        if (slave->delay) {
                                pr_info("%s: link status down for %sinterface %s, disabling it in %d ms\n",
                                        bond->dev->name,
-                                       (bond->params.mode ==
+                                       (BOND_MODE(bond) ==
                                         BOND_MODE_ACTIVEBACKUP) ?
                                        (bond_is_active_slave(slave) ?
                                         "active " : "backup ") : "",
@@ -1968,10 +1999,10 @@ static void bond_miimon_commit(struct bonding *bond)
                        slave->link = BOND_LINK_UP;
                        slave->last_link_up = jiffies;
 
-                       if (bond->params.mode == BOND_MODE_8023AD) {
+                       if (BOND_MODE(bond) == BOND_MODE_8023AD) {
                                /* prevent it from being the active one */
                                bond_set_backup_slave(slave);
-                       } else if (bond->params.mode != BOND_MODE_ACTIVEBACKUP) {
+                       } else if (BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) {
                                /* make it immediately active */
                                bond_set_active_slave(slave);
                        } else if (slave != bond->primary_slave) {
@@ -1985,7 +2016,7 @@ static void bond_miimon_commit(struct bonding *bond)
                                slave->duplex ? "full" : "half");
 
                        /* notify ad that the link status has changed */
-                       if (bond->params.mode == BOND_MODE_8023AD)
+                       if (BOND_MODE(bond) == BOND_MODE_8023AD)
                                bond_3ad_handle_link_change(slave, BOND_LINK_UP);
 
                        if (bond_is_lb(bond))
@@ -2004,15 +2035,15 @@ static void bond_miimon_commit(struct bonding *bond)
 
                        slave->link = BOND_LINK_DOWN;
 
-                       if (bond->params.mode == BOND_MODE_ACTIVEBACKUP ||
-                           bond->params.mode == BOND_MODE_8023AD)
+                       if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP ||
+                           BOND_MODE(bond) == BOND_MODE_8023AD)
                                bond_set_slave_inactive_flags(slave,
                                                              BOND_SLAVE_NOTIFY_NOW);
 
                        pr_info("%s: link status definitely down for interface %s, disabling it\n",
                                bond->dev->name, slave->dev->name);
 
-                       if (bond->params.mode == BOND_MODE_8023AD)
+                       if (BOND_MODE(bond) == BOND_MODE_8023AD)
                                bond_3ad_handle_link_change(slave,
                                                            BOND_LINK_DOWN);
 
@@ -2291,8 +2322,8 @@ int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond,
        int alen, is_arp = skb->protocol == __cpu_to_be16(ETH_P_ARP);
 
        if (!slave_do_arp_validate(bond, slave)) {
-               if ((slave_do_arp_validate_only(bond, slave) && is_arp) ||
-                   !slave_do_arp_validate_only(bond, slave))
+               if ((slave_do_arp_validate_only(bond) && is_arp) ||
+                   !slave_do_arp_validate_only(bond))
                        slave->last_rx = jiffies;
                return RX_HANDLER_ANOTHER;
        } else if (!is_arp) {
@@ -2460,7 +2491,7 @@ static void bond_loadbalance_arp_mon(struct work_struct *work)
                 * do - all replies will be rx'ed on same link causing slaves
                 * to be unstable during low/no traffic periods
                 */
-               if (IS_UP(slave->dev))
+               if (bond_slave_is_up(slave))
                        bond_arp_send_all(bond, slave);
        }
 
@@ -2682,10 +2713,10 @@ static bool bond_ab_arp_probe(struct bonding *bond)
        bond_set_slave_inactive_flags(curr_arp_slave, BOND_SLAVE_NOTIFY_LATER);
 
        bond_for_each_slave_rcu(bond, slave, iter) {
-               if (!found && !before && IS_UP(slave->dev))
+               if (!found && !before && bond_slave_is_up(slave))
                        before = slave;
 
-               if (found && !new_slave && IS_UP(slave->dev))
+               if (found && !new_slave && bond_slave_is_up(slave))
                        new_slave = slave;
                /* if the link state is up at this point, we
                 * mark it down - this can happen if we have
@@ -2694,7 +2725,7 @@ static bool bond_ab_arp_probe(struct bonding *bond)
                 * one the current slave so it is still marked
                 * up when it is actually down
                 */
-               if (!IS_UP(slave->dev) && slave->link == BOND_LINK_UP) {
+               if (!bond_slave_is_up(slave) && slave->link == BOND_LINK_UP) {
                        slave->link = BOND_LINK_DOWN;
                        if (slave->link_failure_count < UINT_MAX)
                                slave->link_failure_count++;
@@ -2857,7 +2888,7 @@ static int bond_slave_netdev_event(unsigned long event,
 
                bond_update_speed_duplex(slave);
 
-               if (bond->params.mode == BOND_MODE_8023AD) {
+               if (BOND_MODE(bond) == BOND_MODE_8023AD) {
                        if (old_speed != slave->speed)
                                bond_3ad_adapter_speed_changed(slave);
                        if (old_duplex != slave->duplex)
@@ -2885,7 +2916,7 @@ static int bond_slave_netdev_event(unsigned long event,
                break;
        case NETDEV_CHANGENAME:
                /* we don't care if we don't have primary set */
-               if (!USES_PRIMARY(bond->params.mode) ||
+               if (!bond_uses_primary(bond) ||
                    !bond->params.primary[0])
                        break;
 
@@ -3015,20 +3046,18 @@ static bool bond_flow_dissect(struct bonding *bond, struct sk_buff *skb,
  * bond_xmit_hash - generate a hash value based on the xmit policy
  * @bond: bonding device
  * @skb: buffer to use for headers
- * @count: modulo value
  *
  * This function will extract the necessary headers from the skb buffer and use
  * them to generate a hash based on the xmit_policy set in the bonding device
- * which will be reduced modulo count before returning.
  */
-int bond_xmit_hash(struct bonding *bond, struct sk_buff *skb, int count)
+u32 bond_xmit_hash(struct bonding *bond, struct sk_buff *skb)
 {
        struct flow_keys flow;
        u32 hash;
 
        if (bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER2 ||
            !bond_flow_dissect(bond, skb, &flow))
-               return bond_eth_hash(skb) % count;
+               return bond_eth_hash(skb);
 
        if (bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER23 ||
            bond->params.xmit_policy == BOND_XMIT_POLICY_ENCAP23)
@@ -3039,7 +3068,7 @@ int bond_xmit_hash(struct bonding *bond, struct sk_buff *skb, int count)
        hash ^= (hash >> 16);
        hash ^= (hash >> 8);
 
-       return hash % count;
+       return hash;
 }
 
 /*-------------------------- Device entry points ----------------------------*/
@@ -3050,7 +3079,7 @@ static void bond_work_init_all(struct bonding *bond)
                          bond_resend_igmp_join_requests_delayed);
        INIT_DELAYED_WORK(&bond->alb_work, bond_alb_monitor);
        INIT_DELAYED_WORK(&bond->mii_work, bond_mii_monitor);
-       if (bond->params.mode == BOND_MODE_ACTIVEBACKUP)
+       if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP)
                INIT_DELAYED_WORK(&bond->arp_work, bond_activebackup_arp_mon);
        else
                INIT_DELAYED_WORK(&bond->arp_work, bond_loadbalance_arp_mon);
@@ -3077,7 +3106,7 @@ static int bond_open(struct net_device *bond_dev)
        if (bond_has_slaves(bond)) {
                read_lock(&bond->curr_slave_lock);
                bond_for_each_slave(bond, slave, iter) {
-                       if (USES_PRIMARY(bond->params.mode)
+                       if (bond_uses_primary(bond)
                                && (slave != bond->curr_active_slave)) {
                                bond_set_slave_inactive_flags(slave,
                                                              BOND_SLAVE_NOTIFY_NOW);
@@ -3096,9 +3125,10 @@ static int bond_open(struct net_device *bond_dev)
                /* bond_alb_initialize must be called before the timer
                 * is started.
                 */
-               if (bond_alb_initialize(bond, (bond->params.mode == BOND_MODE_ALB)))
+               if (bond_alb_initialize(bond, (BOND_MODE(bond) == BOND_MODE_ALB)))
                        return -ENOMEM;
-               queue_delayed_work(bond->wq, &bond->alb_work, 0);
+               if (bond->params.tlb_dynamic_lb)
+                       queue_delayed_work(bond->wq, &bond->alb_work, 0);
        }
 
        if (bond->params.miimon)  /* link check interval, in milliseconds. */
@@ -3109,7 +3139,7 @@ static int bond_open(struct net_device *bond_dev)
                bond->recv_probe = bond_arp_rcv;
        }
 
-       if (bond->params.mode == BOND_MODE_8023AD) {
+       if (BOND_MODE(bond) == BOND_MODE_8023AD) {
                queue_delayed_work(bond->wq, &bond->ad_work, 0);
                /* register to receive LACPDUs */
                bond->recv_probe = bond_3ad_lacpdu_recv;
@@ -3314,7 +3344,7 @@ static void bond_set_rx_mode(struct net_device *bond_dev)
 
 
        rcu_read_lock();
-       if (USES_PRIMARY(bond->params.mode)) {
+       if (bond_uses_primary(bond)) {
                slave = rcu_dereference(bond->curr_active_slave);
                if (slave) {
                        dev_uc_sync(slave->dev, bond_dev);
@@ -3468,7 +3498,7 @@ static int bond_set_mac_address(struct net_device *bond_dev, void *addr)
        struct list_head *iter;
        int res = 0;
 
-       if (bond->params.mode == BOND_MODE_ALB)
+       if (BOND_MODE(bond) == BOND_MODE_ALB)
                return bond_alb_set_mac_address(bond_dev, addr);
 
 
@@ -3479,7 +3509,7 @@ static int bond_set_mac_address(struct net_device *bond_dev, void *addr)
         * Returning an error causes ifenslave to fail.
         */
        if (bond->params.fail_over_mac &&
-           bond->params.mode == BOND_MODE_ACTIVEBACKUP)
+           BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP)
                return 0;
 
        if (!is_valid_ether_addr(sa->sa_data))
@@ -3559,7 +3589,7 @@ static void bond_xmit_slave_id(struct bonding *bond, struct sk_buff *skb, int sl
        /* Here we start from the slave with slave_id */
        bond_for_each_slave_rcu(bond, slave, iter) {
                if (--i < 0) {
-                       if (slave_can_tx(slave)) {
+                       if (bond_slave_can_tx(slave)) {
                                bond_dev_queue_xmit(bond, skb, slave->dev);
                                return;
                        }
@@ -3571,7 +3601,7 @@ static void bond_xmit_slave_id(struct bonding *bond, struct sk_buff *skb, int sl
        bond_for_each_slave_rcu(bond, slave, iter) {
                if (--i < 0)
                        break;
-               if (slave_can_tx(slave)) {
+               if (bond_slave_can_tx(slave)) {
                        bond_dev_queue_xmit(bond, skb, slave->dev);
                        return;
                }
@@ -3628,7 +3658,7 @@ static int bond_xmit_roundrobin(struct sk_buff *skb, struct net_device *bond_dev
         */
        if (iph->protocol == IPPROTO_IGMP && skb->protocol == htons(ETH_P_IP)) {
                slave = rcu_dereference(bond->curr_active_slave);
-               if (slave && slave_can_tx(slave))
+               if (slave && bond_slave_can_tx(slave))
                        bond_dev_queue_xmit(bond, skb, slave->dev);
                else
                        bond_xmit_slave_id(bond, skb, 0);
@@ -3666,7 +3696,7 @@ static int bond_xmit_xor(struct sk_buff *skb, struct net_device *bond_dev)
 {
        struct bonding *bond = netdev_priv(bond_dev);
 
-       bond_xmit_slave_id(bond, skb, bond_xmit_hash(bond, skb, bond->slave_cnt));
+       bond_xmit_slave_id(bond, skb, bond_xmit_hash(bond, skb) % bond->slave_cnt);
 
        return NETDEV_TX_OK;
 }
@@ -3681,7 +3711,7 @@ static int bond_xmit_broadcast(struct sk_buff *skb, struct net_device *bond_dev)
        bond_for_each_slave_rcu(bond, slave, iter) {
                if (bond_is_last_slave(bond, slave))
                        break;
-               if (IS_UP(slave->dev) && slave->link == BOND_LINK_UP) {
+               if (bond_slave_is_up(slave) && slave->link == BOND_LINK_UP) {
                        struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
 
                        if (!skb2) {
@@ -3693,7 +3723,7 @@ static int bond_xmit_broadcast(struct sk_buff *skb, struct net_device *bond_dev)
                        bond_dev_queue_xmit(bond, skb2, slave->dev);
                }
        }
-       if (slave && IS_UP(slave->dev) && slave->link == BOND_LINK_UP)
+       if (slave && bond_slave_is_up(slave) && slave->link == BOND_LINK_UP)
                bond_dev_queue_xmit(bond, skb, slave->dev);
        else
                dev_kfree_skb_any(skb);
@@ -3718,7 +3748,7 @@ static inline int bond_slave_override(struct bonding *bond,
        /* Find out if any slaves have the same mapping as this skb. */
        bond_for_each_slave_rcu(bond, slave, iter) {
                if (slave->queue_id == skb->queue_mapping) {
-                       if (slave_can_tx(slave)) {
+                       if (bond_slave_can_tx(slave)) {
                                bond_dev_queue_xmit(bond, skb, slave->dev);
                                return 0;
                        }
@@ -3759,12 +3789,11 @@ static netdev_tx_t __bond_start_xmit(struct sk_buff *skb, struct net_device *dev
 {
        struct bonding *bond = netdev_priv(dev);
 
-       if (TX_QUEUE_OVERRIDE(bond->params.mode)) {
-               if (!bond_slave_override(bond, skb))
-                       return NETDEV_TX_OK;
-       }
+       if (bond_should_override_tx_queue(bond) &&
+           !bond_slave_override(bond, skb))
+               return NETDEV_TX_OK;
 
-       switch (bond->params.mode) {
+       switch (BOND_MODE(bond)) {
        case BOND_MODE_ROUNDROBIN:
                return bond_xmit_roundrobin(skb, dev);
        case BOND_MODE_ACTIVEBACKUP:
@@ -3776,12 +3805,13 @@ static netdev_tx_t __bond_start_xmit(struct sk_buff *skb, struct net_device *dev
        case BOND_MODE_8023AD:
                return bond_3ad_xmit_xor(skb, dev);
        case BOND_MODE_ALB:
-       case BOND_MODE_TLB:
                return bond_alb_xmit(skb, dev);
+       case BOND_MODE_TLB:
+               return bond_tlb_xmit(skb, dev);
        default:
                /* Should never happen, mode already checked */
                pr_err("%s: Error: Unknown bonding mode %d\n",
-                      dev->name, bond->params.mode);
+                      dev->name, BOND_MODE(bond));
                WARN_ON_ONCE(1);
                dev_kfree_skb_any(skb);
                return NETDEV_TX_OK;
@@ -3821,14 +3851,14 @@ static int bond_ethtool_get_settings(struct net_device *bond_dev,
        ecmd->duplex = DUPLEX_UNKNOWN;
        ecmd->port = PORT_OTHER;
 
-       /* Since SLAVE_IS_OK returns false for all inactive or down slaves, we
+       /* Since bond_slave_can_tx returns false for all inactive or down slaves, we
         * do not need to check mode.  Though link speed might not represent
         * the true receive or transmit bandwidth (not all modes are symmetric)
         * this is an accurate maximum.
         */
        read_lock(&bond->lock);
        bond_for_each_slave(bond, slave, iter) {
-               if (SLAVE_IS_OK(slave)) {
+               if (bond_slave_can_tx(slave)) {
                        if (slave->speed != SPEED_UNKNOWN)
                                speed += slave->speed;
                        if (ecmd->duplex == DUPLEX_UNKNOWN &&
@@ -3998,7 +4028,8 @@ static int bond_check_params(struct bond_params *params)
 
        if (xmit_hash_policy) {
                if ((bond_mode != BOND_MODE_XOR) &&
-                   (bond_mode != BOND_MODE_8023AD)) {
+                   (bond_mode != BOND_MODE_8023AD) &&
+                   (bond_mode != BOND_MODE_TLB)) {
                        pr_info("xmit_hash_policy param is irrelevant in mode %s\n",
                                bond_mode_name(bond_mode));
                } else {
@@ -4083,7 +4114,7 @@ static int bond_check_params(struct bond_params *params)
        }
 
        /* reset values for 802.3ad/TLB/ALB */
-       if (BOND_NO_USES_ARP(bond_mode)) {
+       if (!bond_mode_uses_arp(bond_mode)) {
                if (!miimon) {
                        pr_warn("Warning: miimon must be specified, otherwise bonding will not detect link failure, speed and duplex which are essential for 802.3ad operation\n");
                        pr_warn("Forcing miimon to 100msec\n");
@@ -4165,7 +4196,7 @@ static int bond_check_params(struct bond_params *params)
                   catch mistakes */
                __be32 ip;
                if (!in4_pton(arp_ip_target[i], -1, (u8 *)&ip, -1, NULL) ||
-                   IS_IP_TARGET_UNUSABLE_ADDRESS(ip)) {
+                   !bond_is_ip_target_ok(ip)) {
                        pr_warn("Warning: bad arp_ip_target module parameter (%s), ARP monitoring will not be performed\n",
                                arp_ip_target[i]);
                        arp_interval = 0;
@@ -4238,7 +4269,7 @@ static int bond_check_params(struct bond_params *params)
                pr_debug("Warning: either miimon or arp_interval and arp_ip_target module parameters must be specified, otherwise bonding will not detect link failures! see bonding.txt for details\n");
        }
 
-       if (primary && !USES_PRIMARY(bond_mode)) {
+       if (primary && !bond_mode_uses_primary(bond_mode)) {
                /* currently, using a primary only makes sense
                 * in active backup, TLB or ALB modes
                 */
@@ -4304,6 +4335,7 @@ static int bond_check_params(struct bond_params *params)
        params->min_links = min_links;
        params->lp_interval = lp_interval;
        params->packets_per_slave = packets_per_slave;
+       params->tlb_dynamic_lb = 1; /* Default value */
        if (packets_per_slave > 0) {
                params->reciprocal_packets_per_slave =
                        reciprocal_value(packets_per_slave);
index f847e165d252fb2a4528fe396b2c4f0553e81ac3..5ab3c1847e6760e2f3ef7d2ec35085c2d4bf655b 100644 (file)
@@ -56,10 +56,10 @@ static int bond_fill_slave_info(struct sk_buff *skb,
        if (nla_put_u16(skb, IFLA_BOND_SLAVE_QUEUE_ID, slave->queue_id))
                goto nla_put_failure;
 
-       if (slave->bond->params.mode == BOND_MODE_8023AD) {
+       if (BOND_MODE(slave->bond) == BOND_MODE_8023AD) {
                const struct aggregator *agg;
 
-               agg = SLAVE_AD_INFO(slave).port.aggregator;
+               agg = SLAVE_AD_INFO(slave)->port.aggregator;
                if (agg)
                        if (nla_put_u16(skb, IFLA_BOND_SLAVE_AD_AGGREGATOR_ID,
                                        agg->aggregator_identifier))
@@ -407,7 +407,7 @@ static int bond_fill_info(struct sk_buff *skb,
        unsigned int packets_per_slave;
        int i, targets_added;
 
-       if (nla_put_u8(skb, IFLA_BOND_MODE, bond->params.mode))
+       if (nla_put_u8(skb, IFLA_BOND_MODE, BOND_MODE(bond)))
                goto nla_put_failure;
 
        if (slave_dev &&
@@ -505,7 +505,7 @@ static int bond_fill_info(struct sk_buff *skb,
                       bond->params.ad_select))
                goto nla_put_failure;
 
-       if (bond->params.mode == BOND_MODE_8023AD) {
+       if (BOND_MODE(bond) == BOND_MODE_8023AD) {
                struct ad_info info;
 
                if (!bond_3ad_get_active_agg_info(bond, &info)) {
index 724e30fa20b9fa70166b5d9b25ed9029fab6db73..94094b3d5a3eb7250143c0e4a29a71e185d4fac9 100644 (file)
@@ -70,6 +70,8 @@ static int bond_option_mode_set(struct bonding *bond,
                                const struct bond_opt_value *newval);
 static int bond_option_slaves_set(struct bonding *bond,
                                  const struct bond_opt_value *newval);
+static int bond_option_tlb_dynamic_lb_set(struct bonding *bond,
+                                 const struct bond_opt_value *newval);
 
 
 static const struct bond_opt_value bond_mode_tbl[] = {
@@ -179,6 +181,12 @@ static const struct bond_opt_value bond_lp_interval_tbl[] = {
        { NULL,      -1,      0},
 };
 
+static const struct bond_opt_value bond_tlb_dynamic_lb_tbl[] = {
+       { "off", 0,  0},
+       { "on",  1,  BOND_VALFLAG_DEFAULT},
+       { NULL,  -1, 0}
+};
+
 static const struct bond_option bond_opts[] = {
        [BOND_OPT_MODE] = {
                .id = BOND_OPT_MODE,
@@ -199,7 +207,7 @@ static const struct bond_option bond_opts[] = {
        [BOND_OPT_XMIT_HASH] = {
                .id = BOND_OPT_XMIT_HASH,
                .name = "xmit_hash_policy",
-               .desc = "balance-xor and 802.3ad hashing method",
+               .desc = "balance-xor, 802.3ad, and tlb hashing method",
                .values = bond_xmit_hashtype_tbl,
                .set = bond_option_xmit_hash_policy_set
        },
@@ -364,9 +372,33 @@ static const struct bond_option bond_opts[] = {
                .flags = BOND_OPTFLAG_RAWVAL,
                .set = bond_option_slaves_set
        },
+       [BOND_OPT_TLB_DYNAMIC_LB] = {
+               .id = BOND_OPT_TLB_DYNAMIC_LB,
+               .name = "tlb_dynamic_lb",
+               .desc = "Enable dynamic flow shuffling",
+               .unsuppmodes = BOND_MODE_ALL_EX(BIT(BOND_MODE_TLB)),
+               .values = bond_tlb_dynamic_lb_tbl,
+               .flags = BOND_OPTFLAG_IFDOWN,
+               .set = bond_option_tlb_dynamic_lb_set,
+       },
        { }
 };
 
+/* Searches for an option by name */
+const struct bond_option *bond_opt_get_by_name(const char *name)
+{
+       const struct bond_option *opt;
+       int option;
+
+       for (option = 0; option < BOND_OPT_LAST; option++) {
+               opt = bond_opt_get(option);
+               if (opt && !strcmp(opt->name, name))
+                       return opt;
+       }
+
+       return NULL;
+}
+
 /* Searches for a value in opt's values[] table */
 const struct bond_opt_value *bond_opt_get_val(unsigned int option, u64 val)
 {
@@ -640,7 +672,7 @@ const struct bond_option *bond_opt_get(unsigned int option)
 
 int bond_option_mode_set(struct bonding *bond, const struct bond_opt_value *newval)
 {
-       if (BOND_NO_USES_ARP(newval->value) && bond->params.arp_interval) {
+       if (!bond_mode_uses_arp(newval->value) && bond->params.arp_interval) {
                pr_info("%s: %s mode is incompatible with arp monitoring, start mii monitoring\n",
                        bond->dev->name, newval->string);
                /* disable arp monitoring */
@@ -661,7 +693,7 @@ int bond_option_mode_set(struct bonding *bond, const struct bond_opt_value *newv
 static struct net_device *__bond_option_active_slave_get(struct bonding *bond,
                                                         struct slave *slave)
 {
-       return USES_PRIMARY(bond->params.mode) && slave ? slave->dev : NULL;
+       return bond_uses_primary(bond) && slave ? slave->dev : NULL;
 }
 
 struct net_device *bond_option_active_slave_get_rcu(struct bonding *bond)
@@ -726,7 +758,7 @@ static int bond_option_active_slave_set(struct bonding *bond,
                                bond->dev->name, new_active->dev->name);
                } else {
                        if (old_active && (new_active->link == BOND_LINK_UP) &&
-                           IS_UP(new_active->dev)) {
+                           bond_slave_is_up(new_active)) {
                                pr_info("%s: Setting %s as active slave\n",
                                        bond->dev->name, new_active->dev->name);
                                bond_change_active_slave(bond, new_active);
@@ -745,6 +777,10 @@ static int bond_option_active_slave_set(struct bonding *bond,
        return ret;
 }
 
+/* There are two tricky bits here.  First, if MII monitoring is activated, then
+ * we must disable ARP monitoring.  Second, if the timer isn't running, we must
+ * start it.
+ */
 static int bond_option_miimon_set(struct bonding *bond,
                                  const struct bond_opt_value *newval)
 {
@@ -783,6 +819,10 @@ static int bond_option_miimon_set(struct bonding *bond,
        return 0;
 }
 
+/* Set up and down delays. These must be multiples of the
+ * MII monitoring value, and are stored internally as the multiplier.
+ * Thus, we must translate to MS for the real world.
+ */
 static int bond_option_updelay_set(struct bonding *bond,
                                   const struct bond_opt_value *newval)
 {
@@ -841,6 +881,10 @@ static int bond_option_use_carrier_set(struct bonding *bond,
        return 0;
 }
 
+/* There are two tricky bits here.  First, if ARP monitoring is activated, then
+ * we must disable MII monitoring.  Second, if the ARP timer isn't running,
+ * we must start it.
+ */
 static int bond_option_arp_interval_set(struct bonding *bond,
                                        const struct bond_opt_value *newval)
 {
@@ -898,7 +942,7 @@ static int _bond_option_arp_ip_target_add(struct bonding *bond, __be32 target)
        __be32 *targets = bond->params.arp_targets;
        int ind;
 
-       if (IS_IP_TARGET_UNUSABLE_ADDRESS(target)) {
+       if (!bond_is_ip_target_ok(target)) {
                pr_err("%s: invalid ARP target %pI4 specified for addition\n",
                       bond->dev->name, &target);
                return -EINVAL;
@@ -943,7 +987,7 @@ static int bond_option_arp_ip_target_rem(struct bonding *bond, __be32 target)
        unsigned long *targets_rx;
        int ind, i;
 
-       if (IS_IP_TARGET_UNUSABLE_ADDRESS(target)) {
+       if (!bond_is_ip_target_ok(target)) {
                pr_err("%s: invalid ARP target %pI4 specified for removal\n",
                       bond->dev->name, &target);
                return -EINVAL;
@@ -1337,3 +1381,13 @@ err_no_cmd:
        ret = -EPERM;
        goto out;
 }
+
+static int bond_option_tlb_dynamic_lb_set(struct bonding *bond,
+                                         const struct bond_opt_value *newval)
+{
+       pr_info("%s: Setting dynamic-lb to %s (%llu)\n",
+               bond->dev->name, newval->string, newval->value);
+       bond->params.tlb_dynamic_lb = newval->value;
+
+       return 0;
+}
index 12be9e1bfb0c0d048229a1698c2b384847fbd794..17ded5b291761ca9e85e2fa6c82514a4613815b5 100644 (file)
@@ -62,6 +62,7 @@ enum {
        BOND_OPT_RESEND_IGMP,
        BOND_OPT_LP_INTERVAL,
        BOND_OPT_SLAVES,
+       BOND_OPT_TLB_DYNAMIC_LB,
        BOND_OPT_LAST
 };
 
@@ -104,6 +105,7 @@ int bond_opt_tryset_rtnl(struct bonding *bond, unsigned int option, char *buf);
 const struct bond_opt_value *bond_opt_parse(const struct bond_option *opt,
                                            struct bond_opt_value *val);
 const struct bond_option *bond_opt_get(unsigned int option);
+const struct bond_option *bond_opt_get_by_name(const char *name);
 const struct bond_opt_value *bond_opt_get_val(unsigned int option, u64 val);
 
 /* This helper is used to initialize a bond_opt_value structure for parameter
index 013fdd0f45e94340917ee2aeecc8529d386862e2..b215b479bb3a6917ffd6d37f30f989996d176f5b 100644 (file)
@@ -72,9 +72,9 @@ static void bond_info_show_master(struct seq_file *seq)
        curr = rcu_dereference(bond->curr_active_slave);
 
        seq_printf(seq, "Bonding Mode: %s",
-                  bond_mode_name(bond->params.mode));
+                  bond_mode_name(BOND_MODE(bond)));
 
-       if (bond->params.mode == BOND_MODE_ACTIVEBACKUP &&
+       if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP &&
            bond->params.fail_over_mac) {
                optval = bond_opt_get_val(BOND_OPT_FAIL_OVER_MAC,
                                          bond->params.fail_over_mac);
@@ -83,15 +83,15 @@ static void bond_info_show_master(struct seq_file *seq)
 
        seq_printf(seq, "\n");
 
-       if (bond->params.mode == BOND_MODE_XOR ||
-               bond->params.mode == BOND_MODE_8023AD) {
+       if (BOND_MODE(bond) == BOND_MODE_XOR ||
+               BOND_MODE(bond) == BOND_MODE_8023AD) {
                optval = bond_opt_get_val(BOND_OPT_XMIT_HASH,
                                          bond->params.xmit_policy);
                seq_printf(seq, "Transmit Hash Policy: %s (%d)\n",
                           optval->string, bond->params.xmit_policy);
        }
 
-       if (USES_PRIMARY(bond->params.mode)) {
+       if (bond_uses_primary(bond)) {
                seq_printf(seq, "Primary Slave: %s",
                           (bond->primary_slave) ?
                           bond->primary_slave->dev->name : "None");
@@ -134,7 +134,7 @@ static void bond_info_show_master(struct seq_file *seq)
                seq_printf(seq, "\n");
        }
 
-       if (bond->params.mode == BOND_MODE_8023AD) {
+       if (BOND_MODE(bond) == BOND_MODE_8023AD) {
                struct ad_info ad_info;
 
                seq_puts(seq, "\n802.3ad info\n");
@@ -188,9 +188,9 @@ static void bond_info_show_slave(struct seq_file *seq,
 
        seq_printf(seq, "Permanent HW addr: %pM\n", slave->perm_hwaddr);
 
-       if (bond->params.mode == BOND_MODE_8023AD) {
+       if (BOND_MODE(bond) == BOND_MODE_8023AD) {
                const struct aggregator *agg
-                       = SLAVE_AD_INFO(slave).port.aggregator;
+                       = SLAVE_AD_INFO(slave)->port.aggregator;
 
                if (agg)
                        seq_printf(seq, "Aggregator ID: %d\n",
index 0e8b268da0a08f58c4443c6c36aa62bb9ef0f071..daed52f68ce1614ec94772ad1628f09417e04bfe 100644 (file)
@@ -45,8 +45,7 @@
 #define to_dev(obj)    container_of(obj, struct device, kobj)
 #define to_bond(cd)    ((struct bonding *)(netdev_priv(to_net_dev(cd))))
 
-/*
- * "show" function for the bond_masters attribute.
+/* "show" function for the bond_masters attribute.
  * The class parameter is ignored.
  */
 static ssize_t bonding_show_bonds(struct class *cls,
@@ -88,14 +87,12 @@ static struct net_device *bond_get_by_name(struct bond_net *bn, const char *ifna
        return NULL;
 }
 
-/*
- * "store" function for the bond_masters attribute.  This is what
+/* "store" function for the bond_masters attribute.  This is what
  * creates and deletes entire bonds.
  *
  * The class parameter is ignored.
  *
  */
-
 static ssize_t bonding_store_bonds(struct class *cls,
                                   struct class_attribute *attr,
                                   const char *buffer, size_t count)
@@ -158,9 +155,26 @@ static const struct class_attribute class_attr_bonding_masters = {
        .store = bonding_store_bonds,
 };
 
-/*
- * Show the slaves in the current bond.
- */
+/* Generic "store" method for bonding sysfs option setting */
+static ssize_t bonding_sysfs_store_option(struct device *d,
+                                         struct device_attribute *attr,
+                                         const char *buffer, size_t count)
+{
+       struct bonding *bond = to_bond(d);
+       const struct bond_option *opt;
+       int ret;
+
+       opt = bond_opt_get_by_name(attr->attr.name);
+       if (WARN_ON(!opt))
+               return -ENOENT;
+       ret = bond_opt_tryset_rtnl(bond, opt->id, (char *)buffer);
+       if (!ret)
+               ret = count;
+
+       return ret;
+}
+
+/* Show the slaves in the current bond. */
 static ssize_t bonding_show_slaves(struct device *d,
                                   struct device_attribute *attr, char *buf)
 {
@@ -190,62 +204,24 @@ static ssize_t bonding_show_slaves(struct device *d,
 
        return res;
 }
-
-/*
- * Set the slaves in the current bond.
- * This is supposed to be only thin wrapper for bond_enslave and bond_release.
- * All hard work should be done there.
- */
-static ssize_t bonding_store_slaves(struct device *d,
-                                   struct device_attribute *attr,
-                                   const char *buffer, size_t count)
-{
-       struct bonding *bond = to_bond(d);
-       int ret;
-
-       ret = bond_opt_tryset_rtnl(bond, BOND_OPT_SLAVES, (char *)buffer);
-       if (!ret)
-               ret = count;
-
-       return ret;
-}
 static DEVICE_ATTR(slaves, S_IRUGO | S_IWUSR, bonding_show_slaves,
-                  bonding_store_slaves);
+                  bonding_sysfs_store_option);
 
-/*
- * Show and set the bonding mode.  The bond interface must be down to
- * change the mode.
- */
+/* Show the bonding mode. */
 static ssize_t bonding_show_mode(struct device *d,
                                 struct device_attribute *attr, char *buf)
 {
        struct bonding *bond = to_bond(d);
        const struct bond_opt_value *val;
 
-       val = bond_opt_get_val(BOND_OPT_MODE, bond->params.mode);
+       val = bond_opt_get_val(BOND_OPT_MODE, BOND_MODE(bond));
 
-       return sprintf(buf, "%s %d\n", val->string, bond->params.mode);
-}
-
-static ssize_t bonding_store_mode(struct device *d,
-                                 struct device_attribute *attr,
-                                 const char *buf, size_t count)
-{
-       struct bonding *bond = to_bond(d);
-       int ret;
-
-       ret = bond_opt_tryset_rtnl(bond, BOND_OPT_MODE, (char *)buf);
-       if (!ret)
-               ret = count;
-
-       return ret;
+       return sprintf(buf, "%s %d\n", val->string, BOND_MODE(bond));
 }
 static DEVICE_ATTR(mode, S_IRUGO | S_IWUSR,
-                  bonding_show_mode, bonding_store_mode);
+                  bonding_show_mode, bonding_sysfs_store_option);
 
-/*
- * Show and set the bonding transmit hash method.
- */
+/* Show the bonding transmit hash method. */
 static ssize_t bonding_show_xmit_hash(struct device *d,
                                      struct device_attribute *attr,
                                      char *buf)
@@ -257,26 +233,10 @@ static ssize_t bonding_show_xmit_hash(struct device *d,
 
        return sprintf(buf, "%s %d\n", val->string, bond->params.xmit_policy);
 }
-
-static ssize_t bonding_store_xmit_hash(struct device *d,
-                                      struct device_attribute *attr,
-                                      const char *buf, size_t count)
-{
-       struct bonding *bond = to_bond(d);
-       int ret;
-
-       ret = bond_opt_tryset_rtnl(bond, BOND_OPT_XMIT_HASH, (char *)buf);
-       if (!ret)
-               ret = count;
-
-       return ret;
-}
 static DEVICE_ATTR(xmit_hash_policy, S_IRUGO | S_IWUSR,
-                  bonding_show_xmit_hash, bonding_store_xmit_hash);
+                  bonding_show_xmit_hash, bonding_sysfs_store_option);
 
-/*
- * Show and set arp_validate.
- */
+/* Show arp_validate. */
 static ssize_t bonding_show_arp_validate(struct device *d,
                                         struct device_attribute *attr,
                                         char *buf)
@@ -289,26 +249,10 @@ static ssize_t bonding_show_arp_validate(struct device *d,
 
        return sprintf(buf, "%s %d\n", val->string, bond->params.arp_validate);
 }
-
-static ssize_t bonding_store_arp_validate(struct device *d,
-                                         struct device_attribute *attr,
-                                         const char *buf, size_t count)
-{
-       struct bonding *bond = to_bond(d);
-       int ret;
-
-       ret = bond_opt_tryset_rtnl(bond, BOND_OPT_ARP_VALIDATE, (char *)buf);
-       if (!ret)
-               ret = count;
-
-       return ret;
-}
-
 static DEVICE_ATTR(arp_validate, S_IRUGO | S_IWUSR, bonding_show_arp_validate,
-                  bonding_store_arp_validate);
-/*
- * Show and set arp_all_targets.
- */
+                  bonding_sysfs_store_option);
+
+/* Show arp_all_targets. */
 static ssize_t bonding_show_arp_all_targets(struct device *d,
                                         struct device_attribute *attr,
                                         char *buf)
@@ -321,28 +265,10 @@ static ssize_t bonding_show_arp_all_targets(struct device *d,
        return sprintf(buf, "%s %d\n",
                       val->string, bond->params.arp_all_targets);
 }
-
-static ssize_t bonding_store_arp_all_targets(struct device *d,
-                                         struct device_attribute *attr,
-                                         const char *buf, size_t count)
-{
-       struct bonding *bond = to_bond(d);
-       int ret;
-
-       ret = bond_opt_tryset_rtnl(bond, BOND_OPT_ARP_ALL_TARGETS, (char *)buf);
-       if (!ret)
-               ret = count;
-
-       return ret;
-}
-
 static DEVICE_ATTR(arp_all_targets, S_IRUGO | S_IWUSR,
-                  bonding_show_arp_all_targets, bonding_store_arp_all_targets);
+                  bonding_show_arp_all_targets, bonding_sysfs_store_option);
 
-/*
- * Show and store fail_over_mac.  User only allowed to change the
- * value when there are no slaves.
- */
+/* Show fail_over_mac. */
 static ssize_t bonding_show_fail_over_mac(struct device *d,
                                          struct device_attribute *attr,
                                          char *buf)
@@ -355,30 +281,10 @@ static ssize_t bonding_show_fail_over_mac(struct device *d,
 
        return sprintf(buf, "%s %d\n", val->string, bond->params.fail_over_mac);
 }
-
-static ssize_t bonding_store_fail_over_mac(struct device *d,
-                                          struct device_attribute *attr,
-                                          const char *buf, size_t count)
-{
-       struct bonding *bond = to_bond(d);
-       int ret;
-
-       ret = bond_opt_tryset_rtnl(bond, BOND_OPT_FAIL_OVER_MAC, (char *)buf);
-       if (!ret)
-               ret = count;
-
-       return ret;
-}
-
 static DEVICE_ATTR(fail_over_mac, S_IRUGO | S_IWUSR,
-                  bonding_show_fail_over_mac, bonding_store_fail_over_mac);
+                  bonding_show_fail_over_mac, bonding_sysfs_store_option);
 
-/*
- * Show and set the arp timer interval.  There are two tricky bits
- * here.  First, if ARP monitoring is activated, then we must disable
- * MII monitoring.  Second, if the ARP timer isn't running, we must
- * start it.
- */
+/* Show the arp timer interval. */
 static ssize_t bonding_show_arp_interval(struct device *d,
                                         struct device_attribute *attr,
                                         char *buf)
@@ -387,26 +293,10 @@ static ssize_t bonding_show_arp_interval(struct device *d,
 
        return sprintf(buf, "%d\n", bond->params.arp_interval);
 }
-
-static ssize_t bonding_store_arp_interval(struct device *d,
-                                         struct device_attribute *attr,
-                                         const char *buf, size_t count)
-{
-       struct bonding *bond = to_bond(d);
-       int ret;
-
-       ret = bond_opt_tryset_rtnl(bond, BOND_OPT_ARP_INTERVAL, (char *)buf);
-       if (!ret)
-               ret = count;
-
-       return ret;
-}
 static DEVICE_ATTR(arp_interval, S_IRUGO | S_IWUSR,
-                  bonding_show_arp_interval, bonding_store_arp_interval);
+                  bonding_show_arp_interval, bonding_sysfs_store_option);
 
-/*
- * Show and set the arp targets.
- */
+/* Show the arp targets. */
 static ssize_t bonding_show_arp_targets(struct device *d,
                                        struct device_attribute *attr,
                                        char *buf)
@@ -424,27 +314,10 @@ static ssize_t bonding_show_arp_targets(struct device *d,
 
        return res;
 }
+static DEVICE_ATTR(arp_ip_target, S_IRUGO | S_IWUSR,
+                  bonding_show_arp_targets, bonding_sysfs_store_option);
 
-static ssize_t bonding_store_arp_targets(struct device *d,
-                                        struct device_attribute *attr,
-                                        const char *buf, size_t count)
-{
-       struct bonding *bond = to_bond(d);
-       int ret;
-
-       ret = bond_opt_tryset_rtnl(bond, BOND_OPT_ARP_TARGETS, (char *)buf);
-       if (!ret)
-               ret = count;
-
-       return ret;
-}
-static DEVICE_ATTR(arp_ip_target, S_IRUGO | S_IWUSR , bonding_show_arp_targets, bonding_store_arp_targets);
-
-/*
- * Show and set the up and down delays.  These must be multiples of the
- * MII monitoring value, and are stored internally as the multiplier.
- * Thus, we must translate to MS for the real world.
- */
+/* Show the up and down delays. */
 static ssize_t bonding_show_downdelay(struct device *d,
                                      struct device_attribute *attr,
                                      char *buf)
@@ -453,22 +326,8 @@ static ssize_t bonding_show_downdelay(struct device *d,
 
        return sprintf(buf, "%d\n", bond->params.downdelay * bond->params.miimon);
 }
-
-static ssize_t bonding_store_downdelay(struct device *d,
-                                      struct device_attribute *attr,
-                                      const char *buf, size_t count)
-{
-       struct bonding *bond = to_bond(d);
-       int ret;
-
-       ret = bond_opt_tryset_rtnl(bond, BOND_OPT_DOWNDELAY, (char *)buf);
-       if (!ret)
-               ret = count;
-
-       return ret;
-}
 static DEVICE_ATTR(downdelay, S_IRUGO | S_IWUSR,
-                  bonding_show_downdelay, bonding_store_downdelay);
+                  bonding_show_downdelay, bonding_sysfs_store_option);
 
 static ssize_t bonding_show_updelay(struct device *d,
                                    struct device_attribute *attr,
@@ -479,27 +338,10 @@ static ssize_t bonding_show_updelay(struct device *d,
        return sprintf(buf, "%d\n", bond->params.updelay * bond->params.miimon);
 
 }
-
-static ssize_t bonding_store_updelay(struct device *d,
-                                    struct device_attribute *attr,
-                                    const char *buf, size_t count)
-{
-       struct bonding *bond = to_bond(d);
-       int ret;
-
-       ret = bond_opt_tryset_rtnl(bond, BOND_OPT_UPDELAY, (char *)buf);
-       if (!ret)
-               ret = count;
-
-       return ret;
-}
 static DEVICE_ATTR(updelay, S_IRUGO | S_IWUSR,
-                  bonding_show_updelay, bonding_store_updelay);
+                  bonding_show_updelay, bonding_sysfs_store_option);
 
-/*
- * Show and set the LACP interval.  Interface must be down, and the mode
- * must be set to 802.3ad mode.
- */
+/* Show the LACP interval. */
 static ssize_t bonding_show_lacp(struct device *d,
                                 struct device_attribute *attr,
                                 char *buf)
@@ -511,22 +353,8 @@ static ssize_t bonding_show_lacp(struct device *d,
 
        return sprintf(buf, "%s %d\n", val->string, bond->params.lacp_fast);
 }
-
-static ssize_t bonding_store_lacp(struct device *d,
-                                 struct device_attribute *attr,
-                                 const char *buf, size_t count)
-{
-       struct bonding *bond = to_bond(d);
-       int ret;
-
-       ret = bond_opt_tryset_rtnl(bond, BOND_OPT_LACP_RATE, (char *)buf);
-       if (!ret)
-               ret = count;
-
-       return ret;
-}
 static DEVICE_ATTR(lacp_rate, S_IRUGO | S_IWUSR,
-                  bonding_show_lacp, bonding_store_lacp);
+                  bonding_show_lacp, bonding_sysfs_store_option);
 
 static ssize_t bonding_show_min_links(struct device *d,
                                      struct device_attribute *attr,
@@ -534,24 +362,10 @@ static ssize_t bonding_show_min_links(struct device *d,
 {
        struct bonding *bond = to_bond(d);
 
-       return sprintf(buf, "%d\n", bond->params.min_links);
-}
-
-static ssize_t bonding_store_min_links(struct device *d,
-                                      struct device_attribute *attr,
-                                      const char *buf, size_t count)
-{
-       struct bonding *bond = to_bond(d);
-       int ret;
-
-       ret = bond_opt_tryset_rtnl(bond, BOND_OPT_MINLINKS, (char *)buf);
-       if (!ret)
-               ret = count;
-
-       return ret;
+       return sprintf(buf, "%u\n", bond->params.min_links);
 }
 static DEVICE_ATTR(min_links, S_IRUGO | S_IWUSR,
-                  bonding_show_min_links, bonding_store_min_links);
+                  bonding_show_min_links, bonding_sysfs_store_option);
 
 static ssize_t bonding_show_ad_select(struct device *d,
                                      struct device_attribute *attr,
@@ -564,27 +378,10 @@ static ssize_t bonding_show_ad_select(struct device *d,
 
        return sprintf(buf, "%s %d\n", val->string, bond->params.ad_select);
 }
-
-
-static ssize_t bonding_store_ad_select(struct device *d,
-                                      struct device_attribute *attr,
-                                      const char *buf, size_t count)
-{
-       struct bonding *bond = to_bond(d);
-       int ret;
-
-       ret = bond_opt_tryset_rtnl(bond, BOND_OPT_AD_SELECT, (char *)buf);
-       if (!ret)
-               ret = count;
-
-       return ret;
-}
 static DEVICE_ATTR(ad_select, S_IRUGO | S_IWUSR,
-                  bonding_show_ad_select, bonding_store_ad_select);
+                  bonding_show_ad_select, bonding_sysfs_store_option);
 
-/*
- * Show and set the number of peer notifications to send after a failover event.
- */
+/* Show and set the number of peer notifications to send after a failover event. */
 static ssize_t bonding_show_num_peer_notif(struct device *d,
                                           struct device_attribute *attr,
                                           char *buf)
@@ -611,12 +408,7 @@ static DEVICE_ATTR(num_grat_arp, S_IRUGO | S_IWUSR,
 static DEVICE_ATTR(num_unsol_na, S_IRUGO | S_IWUSR,
                   bonding_show_num_peer_notif, bonding_store_num_peer_notif);
 
-/*
- * Show and set the MII monitor interval.  There are two tricky bits
- * here.  First, if MII monitoring is activated, then we must disable
- * ARP monitoring.  Second, if the timer isn't running, we must
- * start it.
- */
+/* Show the MII monitor interval. */
 static ssize_t bonding_show_miimon(struct device *d,
                                   struct device_attribute *attr,
                                   char *buf)
@@ -625,30 +417,10 @@ static ssize_t bonding_show_miimon(struct device *d,
 
        return sprintf(buf, "%d\n", bond->params.miimon);
 }
-
-static ssize_t bonding_store_miimon(struct device *d,
-                                   struct device_attribute *attr,
-                                   const char *buf, size_t count)
-{
-       struct bonding *bond = to_bond(d);
-       int ret;
-
-       ret = bond_opt_tryset_rtnl(bond, BOND_OPT_MIIMON, (char *)buf);
-       if (!ret)
-               ret = count;
-
-       return ret;
-}
 static DEVICE_ATTR(miimon, S_IRUGO | S_IWUSR,
-                  bonding_show_miimon, bonding_store_miimon);
+                  bonding_show_miimon, bonding_sysfs_store_option);
 
-/*
- * Show and set the primary slave.  The store function is much
- * simpler than bonding_store_slaves function because it only needs to
- * handle one interface name.
- * The bond must be a mode that supports a primary for this be
- * set.
- */
+/* Show the primary slave. */
 static ssize_t bonding_show_primary(struct device *d,
                                    struct device_attribute *attr,
                                    char *buf)
@@ -661,26 +433,10 @@ static ssize_t bonding_show_primary(struct device *d,
 
        return count;
 }
-
-static ssize_t bonding_store_primary(struct device *d,
-                                    struct device_attribute *attr,
-                                    const char *buf, size_t count)
-{
-       struct bonding *bond = to_bond(d);
-       int ret;
-
-       ret = bond_opt_tryset_rtnl(bond, BOND_OPT_PRIMARY, (char *)buf);
-       if (!ret)
-               ret = count;
-
-       return ret;
-}
 static DEVICE_ATTR(primary, S_IRUGO | S_IWUSR,
-                  bonding_show_primary, bonding_store_primary);
+                  bonding_show_primary, bonding_sysfs_store_option);
 
-/*
- * Show and set the primary_reselect flag.
- */
+/* Show the primary_reselect flag. */
 static ssize_t bonding_show_primary_reselect(struct device *d,
                                             struct device_attribute *attr,
                                             char *buf)
@@ -694,28 +450,10 @@ static ssize_t bonding_show_primary_reselect(struct device *d,
        return sprintf(buf, "%s %d\n",
                       val->string, bond->params.primary_reselect);
 }
-
-static ssize_t bonding_store_primary_reselect(struct device *d,
-                                             struct device_attribute *attr,
-                                             const char *buf, size_t count)
-{
-       struct bonding *bond = to_bond(d);
-       int ret;
-
-       ret = bond_opt_tryset_rtnl(bond, BOND_OPT_PRIMARY_RESELECT,
-                                  (char *)buf);
-       if (!ret)
-               ret = count;
-
-       return ret;
-}
 static DEVICE_ATTR(primary_reselect, S_IRUGO | S_IWUSR,
-                  bonding_show_primary_reselect,
-                  bonding_store_primary_reselect);
+                  bonding_show_primary_reselect, bonding_sysfs_store_option);
 
-/*
- * Show and set the use_carrier flag.
- */
+/* Show the use_carrier flag. */
 static ssize_t bonding_show_carrier(struct device *d,
                                    struct device_attribute *attr,
                                    char *buf)
@@ -724,27 +462,11 @@ static ssize_t bonding_show_carrier(struct device *d,
 
        return sprintf(buf, "%d\n", bond->params.use_carrier);
 }
-
-static ssize_t bonding_store_carrier(struct device *d,
-                                    struct device_attribute *attr,
-                                    const char *buf, size_t count)
-{
-       struct bonding *bond = to_bond(d);
-       int ret;
-
-       ret = bond_opt_tryset_rtnl(bond, BOND_OPT_USE_CARRIER, (char *)buf);
-       if (!ret)
-               ret = count;
-
-       return ret;
-}
 static DEVICE_ATTR(use_carrier, S_IRUGO | S_IWUSR,
-                  bonding_show_carrier, bonding_store_carrier);
+                  bonding_show_carrier, bonding_sysfs_store_option);
 
 
-/*
- * Show and set currently active_slave.
- */
+/* Show currently active_slave. */
 static ssize_t bonding_show_active_slave(struct device *d,
                                         struct device_attribute *attr,
                                         char *buf)
@@ -761,27 +483,10 @@ static ssize_t bonding_show_active_slave(struct device *d,
 
        return count;
 }
-
-static ssize_t bonding_store_active_slave(struct device *d,
-                                         struct device_attribute *attr,
-                                         const char *buf, size_t count)
-{
-       struct bonding *bond = to_bond(d);
-       int ret;
-
-       ret = bond_opt_tryset_rtnl(bond, BOND_OPT_ACTIVE_SLAVE, (char *)buf);
-       if (!ret)
-               ret = count;
-
-       return ret;
-}
 static DEVICE_ATTR(active_slave, S_IRUGO | S_IWUSR,
-                  bonding_show_active_slave, bonding_store_active_slave);
-
+                  bonding_show_active_slave, bonding_sysfs_store_option);
 
-/*
- * Show link status of the bond interface.
- */
+/* Show link status of the bond interface. */
 static ssize_t bonding_show_mii_status(struct device *d,
                                       struct device_attribute *attr,
                                       char *buf)
@@ -792,9 +497,7 @@ static ssize_t bonding_show_mii_status(struct device *d,
 }
 static DEVICE_ATTR(mii_status, S_IRUGO, bonding_show_mii_status, NULL);
 
-/*
- * Show current 802.3ad aggregator ID.
- */
+/* Show current 802.3ad aggregator ID. */
 static ssize_t bonding_show_ad_aggregator(struct device *d,
                                          struct device_attribute *attr,
                                          char *buf)
@@ -802,7 +505,7 @@ static ssize_t bonding_show_ad_aggregator(struct device *d,
        int count = 0;
        struct bonding *bond = to_bond(d);
 
-       if (bond->params.mode == BOND_MODE_8023AD) {
+       if (BOND_MODE(bond) == BOND_MODE_8023AD) {
                struct ad_info ad_info;
                count = sprintf(buf, "%d\n",
                                bond_3ad_get_active_agg_info(bond, &ad_info)
@@ -814,9 +517,7 @@ static ssize_t bonding_show_ad_aggregator(struct device *d,
 static DEVICE_ATTR(ad_aggregator, S_IRUGO, bonding_show_ad_aggregator, NULL);
 
 
-/*
- * Show number of active 802.3ad ports.
- */
+/* Show number of active 802.3ad ports. */
 static ssize_t bonding_show_ad_num_ports(struct device *d,
                                         struct device_attribute *attr,
                                         char *buf)
@@ -824,7 +525,7 @@ static ssize_t bonding_show_ad_num_ports(struct device *d,
        int count = 0;
        struct bonding *bond = to_bond(d);
 
-       if (bond->params.mode == BOND_MODE_8023AD) {
+       if (BOND_MODE(bond) == BOND_MODE_8023AD) {
                struct ad_info ad_info;
                count = sprintf(buf, "%d\n",
                                bond_3ad_get_active_agg_info(bond, &ad_info)
@@ -836,9 +537,7 @@ static ssize_t bonding_show_ad_num_ports(struct device *d,
 static DEVICE_ATTR(ad_num_ports, S_IRUGO, bonding_show_ad_num_ports, NULL);
 
 
-/*
- * Show current 802.3ad actor key.
- */
+/* Show current 802.3ad actor key. */
 static ssize_t bonding_show_ad_actor_key(struct device *d,
                                         struct device_attribute *attr,
                                         char *buf)
@@ -846,7 +545,7 @@ static ssize_t bonding_show_ad_actor_key(struct device *d,
        int count = 0;
        struct bonding *bond = to_bond(d);
 
-       if (bond->params.mode == BOND_MODE_8023AD) {
+       if (BOND_MODE(bond) == BOND_MODE_8023AD) {
                struct ad_info ad_info;
                count = sprintf(buf, "%d\n",
                                bond_3ad_get_active_agg_info(bond, &ad_info)
@@ -858,9 +557,7 @@ static ssize_t bonding_show_ad_actor_key(struct device *d,
 static DEVICE_ATTR(ad_actor_key, S_IRUGO, bonding_show_ad_actor_key, NULL);
 
 
-/*
- * Show current 802.3ad partner key.
- */
+/* Show current 802.3ad partner key. */
 static ssize_t bonding_show_ad_partner_key(struct device *d,
                                           struct device_attribute *attr,
                                           char *buf)
@@ -868,7 +565,7 @@ static ssize_t bonding_show_ad_partner_key(struct device *d,
        int count = 0;
        struct bonding *bond = to_bond(d);
 
-       if (bond->params.mode == BOND_MODE_8023AD) {
+       if (BOND_MODE(bond) == BOND_MODE_8023AD) {
                struct ad_info ad_info;
                count = sprintf(buf, "%d\n",
                                bond_3ad_get_active_agg_info(bond, &ad_info)
@@ -880,9 +577,7 @@ static ssize_t bonding_show_ad_partner_key(struct device *d,
 static DEVICE_ATTR(ad_partner_key, S_IRUGO, bonding_show_ad_partner_key, NULL);
 
 
-/*
- * Show current 802.3ad partner mac.
- */
+/* Show current 802.3ad partner mac. */
 static ssize_t bonding_show_ad_partner_mac(struct device *d,
                                           struct device_attribute *attr,
                                           char *buf)
@@ -890,7 +585,7 @@ static ssize_t bonding_show_ad_partner_mac(struct device *d,
        int count = 0;
        struct bonding *bond = to_bond(d);
 
-       if (bond->params.mode == BOND_MODE_8023AD) {
+       if (BOND_MODE(bond) == BOND_MODE_8023AD) {
                struct ad_info ad_info;
                if (!bond_3ad_get_active_agg_info(bond, &ad_info))
                        count = sprintf(buf, "%pM\n", ad_info.partner_system);
@@ -900,9 +595,7 @@ static ssize_t bonding_show_ad_partner_mac(struct device *d,
 }
 static DEVICE_ATTR(ad_partner_mac, S_IRUGO, bonding_show_ad_partner_mac, NULL);
 
-/*
- * Show the queue_ids of the slaves in the current bond.
- */
+/* Show the queue_ids of the slaves in the current bond. */
 static ssize_t bonding_show_queue_id(struct device *d,
                                     struct device_attribute *attr,
                                     char *buf)
@@ -933,31 +626,11 @@ static ssize_t bonding_show_queue_id(struct device *d,
 
        return res;
 }
-
-/*
- * Set the queue_ids of the  slaves in the current bond.  The bond
- * interface must be enslaved for this to work.
- */
-static ssize_t bonding_store_queue_id(struct device *d,
-                                     struct device_attribute *attr,
-                                     const char *buffer, size_t count)
-{
-       struct bonding *bond = to_bond(d);
-       int ret;
-
-       ret = bond_opt_tryset_rtnl(bond, BOND_OPT_QUEUE_ID, (char *)buffer);
-       if (!ret)
-               ret = count;
-
-       return ret;
-}
 static DEVICE_ATTR(queue_id, S_IRUGO | S_IWUSR, bonding_show_queue_id,
-                  bonding_store_queue_id);
+                  bonding_sysfs_store_option);
 
 
-/*
- * Show and set the all_slaves_active flag.
- */
+/* Show the all_slaves_active flag. */
 static ssize_t bonding_show_slaves_active(struct device *d,
                                          struct device_attribute *attr,
                                          char *buf)
@@ -966,27 +639,10 @@ static ssize_t bonding_show_slaves_active(struct device *d,
 
        return sprintf(buf, "%d\n", bond->params.all_slaves_active);
 }
-
-static ssize_t bonding_store_slaves_active(struct device *d,
-                                          struct device_attribute *attr,
-                                          const char *buf, size_t count)
-{
-       struct bonding *bond = to_bond(d);
-       int ret;
-
-       ret = bond_opt_tryset_rtnl(bond, BOND_OPT_ALL_SLAVES_ACTIVE,
-                                  (char *)buf);
-       if (!ret)
-               ret = count;
-
-       return ret;
-}
 static DEVICE_ATTR(all_slaves_active, S_IRUGO | S_IWUSR,
-                  bonding_show_slaves_active, bonding_store_slaves_active);
+                  bonding_show_slaves_active, bonding_sysfs_store_option);
 
-/*
- * Show and set the number of IGMP membership reports to send on link failure
- */
+/* Show the number of IGMP membership reports to send on link failure */
 static ssize_t bonding_show_resend_igmp(struct device *d,
                                        struct device_attribute *attr,
                                        char *buf)
@@ -995,23 +651,8 @@ static ssize_t bonding_show_resend_igmp(struct device *d,
 
        return sprintf(buf, "%d\n", bond->params.resend_igmp);
 }
-
-static ssize_t bonding_store_resend_igmp(struct device *d,
-                                        struct device_attribute *attr,
-                                        const char *buf, size_t count)
-{
-       struct bonding *bond = to_bond(d);
-       int ret;
-
-       ret = bond_opt_tryset_rtnl(bond, BOND_OPT_RESEND_IGMP, (char *)buf);
-       if (!ret)
-               ret = count;
-
-       return ret;
-}
-
 static DEVICE_ATTR(resend_igmp, S_IRUGO | S_IWUSR,
-                  bonding_show_resend_igmp, bonding_store_resend_igmp);
+                  bonding_show_resend_igmp, bonding_sysfs_store_option);
 
 
 static ssize_t bonding_show_lp_interval(struct device *d,
@@ -1019,25 +660,21 @@ static ssize_t bonding_show_lp_interval(struct device *d,
                                        char *buf)
 {
        struct bonding *bond = to_bond(d);
+
        return sprintf(buf, "%d\n", bond->params.lp_interval);
 }
+static DEVICE_ATTR(lp_interval, S_IRUGO | S_IWUSR,
+                  bonding_show_lp_interval, bonding_sysfs_store_option);
 
-static ssize_t bonding_store_lp_interval(struct device *d,
-                                        struct device_attribute *attr,
-                                        const char *buf, size_t count)
+static ssize_t bonding_show_tlb_dynamic_lb(struct device *d,
+                                          struct device_attribute *attr,
+                                          char *buf)
 {
        struct bonding *bond = to_bond(d);
-       int ret;
-
-       ret = bond_opt_tryset_rtnl(bond, BOND_OPT_LP_INTERVAL, (char *)buf);
-       if (!ret)
-               ret = count;
-
-       return ret;
+       return sprintf(buf, "%d\n", bond->params.tlb_dynamic_lb);
 }
-
-static DEVICE_ATTR(lp_interval, S_IRUGO | S_IWUSR,
-                  bonding_show_lp_interval, bonding_store_lp_interval);
+static DEVICE_ATTR(tlb_dynamic_lb, S_IRUGO | S_IWUSR,
+                  bonding_show_tlb_dynamic_lb, bonding_sysfs_store_option);
 
 static ssize_t bonding_show_packets_per_slave(struct device *d,
                                              struct device_attribute *attr,
@@ -1045,27 +682,11 @@ static ssize_t bonding_show_packets_per_slave(struct device *d,
 {
        struct bonding *bond = to_bond(d);
        unsigned int packets_per_slave = bond->params.packets_per_slave;
-       return sprintf(buf, "%u\n", packets_per_slave);
-}
-
-static ssize_t bonding_store_packets_per_slave(struct device *d,
-                                              struct device_attribute *attr,
-                                              const char *buf, size_t count)
-{
-       struct bonding *bond = to_bond(d);
-       int ret;
 
-       ret = bond_opt_tryset_rtnl(bond, BOND_OPT_PACKETS_PER_SLAVE,
-                                  (char *)buf);
-       if (!ret)
-               ret = count;
-
-       return ret;
+       return sprintf(buf, "%u\n", packets_per_slave);
 }
-
 static DEVICE_ATTR(packets_per_slave, S_IRUGO | S_IWUSR,
-                  bonding_show_packets_per_slave,
-                  bonding_store_packets_per_slave);
+                  bonding_show_packets_per_slave, bonding_sysfs_store_option);
 
 static struct attribute *per_bond_attrs[] = {
        &dev_attr_slaves.attr,
@@ -1099,6 +720,7 @@ static struct attribute *per_bond_attrs[] = {
        &dev_attr_min_links.attr,
        &dev_attr_lp_interval.attr,
        &dev_attr_packets_per_slave.attr,
+       &dev_attr_tlb_dynamic_lb.attr,
        NULL,
 };
 
@@ -1107,8 +729,7 @@ static struct attribute_group bonding_group = {
        .attrs = per_bond_attrs,
 };
 
-/*
- * Initialize sysfs.  This sets up the bonding_masters file in
+/* Initialize sysfs.  This sets up the bonding_masters file in
  * /sys/class/net.
  */
 int bond_create_sysfs(struct bond_net *bn)
@@ -1120,8 +741,7 @@ int bond_create_sysfs(struct bond_net *bn)
 
        ret = netdev_class_create_file_ns(&bn->class_attr_bonding_masters,
                                          bn->net);
-       /*
-        * Permit multiple loads of the module by ignoring failures to
+       /* Permit multiple loads of the module by ignoring failures to
         * create the bonding_masters sysfs file.  Bonding devices
         * created by second or subsequent loads of the module will
         * not be listed in, or controllable by, bonding_masters, but
@@ -1144,16 +764,13 @@ int bond_create_sysfs(struct bond_net *bn)
 
 }
 
-/*
- * Remove /sys/class/net/bonding_masters.
- */
+/* Remove /sys/class/net/bonding_masters. */
 void bond_destroy_sysfs(struct bond_net *bn)
 {
        netdev_class_remove_file_ns(&bn->class_attr_bonding_masters, bn->net);
 }
 
-/*
- * Initialize sysfs for each bond.  This sets up and registers
+/* Initialize sysfs for each bond.  This sets up and registers
  * the 'bondctl' directory for each individual bond under /sys/class/net.
  */
 void bond_prepare_sysfs_group(struct bonding *bond)
index 2e4eec5450c80b726a7bbf1a47ae169cdd18db4f..198677f58ce0af4134b2491e90e6774fa2ae17b4 100644 (file)
@@ -69,8 +69,8 @@ static ssize_t ad_aggregator_id_show(struct slave *slave, char *buf)
 {
        const struct aggregator *agg;
 
-       if (slave->bond->params.mode == BOND_MODE_8023AD) {
-               agg = SLAVE_AD_INFO(slave).port.aggregator;
+       if (BOND_MODE(slave->bond) == BOND_MODE_8023AD) {
+               agg = SLAVE_AD_INFO(slave)->port.aggregator;
                if (agg)
                        return sprintf(buf, "%d\n",
                                       agg->aggregator_identifier);
index b8bdd0acc8f334ac97bca2ddfea602f473c3272f..dfc37797df41ff9212bd3ca6f002d63fceb72d38 100644 (file)
 
 #define BOND_DEFAULT_MIIMON    100
 
-#define IS_UP(dev)                                        \
-             ((((dev)->flags & IFF_UP) == IFF_UP)      && \
-              netif_running(dev)                       && \
-              netif_carrier_ok(dev))
-
-/*
- * Checks whether slave is ready for transmit.
- */
-#define SLAVE_IS_OK(slave)                             \
-                   (((slave)->dev->flags & IFF_UP)  && \
-                    netif_running((slave)->dev)     && \
-                    ((slave)->link == BOND_LINK_UP) && \
-                    bond_is_active_slave(slave))
-
-
-#define USES_PRIMARY(mode)                             \
-               (((mode) == BOND_MODE_ACTIVEBACKUP) ||  \
-                ((mode) == BOND_MODE_TLB)          ||  \
-                ((mode) == BOND_MODE_ALB))
-
-#define BOND_NO_USES_ARP(mode)                         \
-               (((mode) == BOND_MODE_8023AD)   ||      \
-                ((mode) == BOND_MODE_TLB)      ||      \
-                ((mode) == BOND_MODE_ALB))
-
-#define TX_QUEUE_OVERRIDE(mode)                                \
-                       (((mode) == BOND_MODE_ACTIVEBACKUP) ||  \
-                        ((mode) == BOND_MODE_ROUNDROBIN))
-
-#define BOND_MODE_IS_LB(mode)                  \
-               (((mode) == BOND_MODE_TLB) ||   \
-                ((mode) == BOND_MODE_ALB))
-
-#define IS_IP_TARGET_UNUSABLE_ADDRESS(a)       \
-       ((htonl(INADDR_BROADCAST) == a) ||      \
-        ipv4_is_zeronet(a))
 /*
  * Less bad way to call ioctl from within the kernel; this needs to be
  * done some other way to get the call out of interrupt context.
@@ -89,6 +53,8 @@
        set_fs(fs);                     \
        res; })
 
+#define BOND_MODE(bond) ((bond)->params.mode)
+
 /* slave list primitives */
 #define bond_slave_list(bond) (&(bond)->dev->adj_list.lower)
 
@@ -174,6 +140,7 @@ struct bond_params {
        int resend_igmp;
        int lp_interval;
        int packets_per_slave;
+       int tlb_dynamic_lb;
        struct reciprocal_value reciprocal_packets_per_slave;
 };
 
@@ -182,8 +149,6 @@ struct bond_parm_tbl {
        int mode;
 };
 
-#define BOND_MAX_MODENAME_LEN 20
-
 struct slave {
        struct net_device *dev; /* first - useful for panic debug */
        struct bonding *bond; /* our master */
@@ -204,7 +169,7 @@ struct slave {
        u32    speed;
        u16    queue_id;
        u8     perm_hwaddr[ETH_ALEN];
-       struct ad_slave_info ad_info; /* HUGE - better to dynamically alloc */
+       struct ad_slave_info *ad_info;
        struct tlb_slave_info tlb_info;
 #ifdef CONFIG_NET_POLL_CONTROLLER
        struct netpoll *np;
@@ -284,14 +249,41 @@ static inline struct slave *bond_get_slave_by_dev(struct bonding *bond,
 
 static inline struct bonding *bond_get_bond_by_slave(struct slave *slave)
 {
-       if (!slave || !slave->bond)
-               return NULL;
        return slave->bond;
 }
 
+static inline bool bond_should_override_tx_queue(struct bonding *bond)
+{
+       return BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP ||
+              BOND_MODE(bond) == BOND_MODE_ROUNDROBIN;
+}
+
 static inline bool bond_is_lb(const struct bonding *bond)
 {
-       return BOND_MODE_IS_LB(bond->params.mode);
+       return BOND_MODE(bond) == BOND_MODE_TLB ||
+              BOND_MODE(bond) == BOND_MODE_ALB;
+}
+
+static inline bool bond_mode_uses_arp(int mode)
+{
+       return mode != BOND_MODE_8023AD && mode != BOND_MODE_TLB &&
+              mode != BOND_MODE_ALB;
+}
+
+static inline bool bond_mode_uses_primary(int mode)
+{
+       return mode == BOND_MODE_ACTIVEBACKUP || mode == BOND_MODE_TLB ||
+              mode == BOND_MODE_ALB;
+}
+
+static inline bool bond_uses_primary(struct bonding *bond)
+{
+       return bond_mode_uses_primary(BOND_MODE(bond));
+}
+
+static inline bool bond_slave_is_up(struct slave *slave)
+{
+       return netif_running(slave->dev) && netif_carrier_ok(slave->dev);
 }
 
 static inline void bond_set_active_slave(struct slave *slave)
@@ -364,6 +356,12 @@ static inline bool bond_is_active_slave(struct slave *slave)
        return !bond_slave_state(slave);
 }
 
+static inline bool bond_slave_can_tx(struct slave *slave)
+{
+       return bond_slave_is_up(slave) && slave->link == BOND_LINK_UP &&
+              bond_is_active_slave(slave);
+}
+
 #define BOND_PRI_RESELECT_ALWAYS       0
 #define BOND_PRI_RESELECT_BETTER       1
 #define BOND_PRI_RESELECT_FAILURE      2
@@ -395,12 +393,16 @@ static inline int slave_do_arp_validate(struct bonding *bond,
        return bond->params.arp_validate & (1 << bond_slave_state(slave));
 }
 
-static inline int slave_do_arp_validate_only(struct bonding *bond,
-                                            struct slave *slave)
+static inline int slave_do_arp_validate_only(struct bonding *bond)
 {
        return bond->params.arp_validate & BOND_ARP_FILTER;
 }
 
+static inline int bond_is_ip_target_ok(__be32 addr)
+{
+       return !ipv4_is_lbcast(addr) && !ipv4_is_zeronet(addr);
+}
+
 /* Get the oldest arp which we've received on this slave for bond's
  * arp_targets.
  */
@@ -478,16 +480,14 @@ static inline __be32 bond_confirm_addr(struct net_device *dev, __be32 dst, __be3
        return addr;
 }
 
-static inline bool slave_can_tx(struct slave *slave)
-{
-       if (IS_UP(slave->dev) && slave->link == BOND_LINK_UP &&
-           bond_is_active_slave(slave))
-               return true;
-       else
-               return false;
-}
-
-struct bond_net;
+struct bond_net {
+       struct net              *net;   /* Associated network namespace */
+       struct list_head        dev_list;
+#ifdef CONFIG_PROC_FS
+       struct proc_dir_entry   *proc_dir;
+#endif
+       struct class_attribute  class_attr_bonding_masters;
+};
 
 int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond, struct slave *slave);
 void bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb, struct net_device *slave_dev);
@@ -499,7 +499,7 @@ int bond_sysfs_slave_add(struct slave *slave);
 void bond_sysfs_slave_del(struct slave *slave);
 int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev);
 int bond_release(struct net_device *bond_dev, struct net_device *slave_dev);
-int bond_xmit_hash(struct bonding *bond, struct sk_buff *skb, int count);
+u32 bond_xmit_hash(struct bonding *bond, struct sk_buff *skb);
 void bond_select_active_slave(struct bonding *bond);
 void bond_change_active_slave(struct bonding *bond, struct slave *new_active);
 void bond_create_debugfs(void);
@@ -516,15 +516,6 @@ struct net_device *bond_option_active_slave_get_rcu(struct bonding *bond);
 struct net_device *bond_option_active_slave_get(struct bonding *bond);
 const char *bond_slave_link_status(s8 link);
 
-struct bond_net {
-       struct net *            net;    /* Associated network namespace */
-       struct list_head        dev_list;
-#ifdef CONFIG_PROC_FS
-       struct proc_dir_entry * proc_dir;
-#endif
-       struct class_attribute  class_attr_bonding_masters;
-};
-
 #ifdef CONFIG_PROC_FS
 void bond_create_proc_entry(struct bonding *bond);
 void bond_remove_proc_entry(struct bonding *bond);
index 9e7d95dae2c7038478d6efadddba81e2778f47a9..714b18790cafa945410fe5c05707ca990b592948 100644 (file)
@@ -65,7 +65,7 @@ config CAN_LEDS
 
 config CAN_AT91
        tristate "Atmel AT91 onchip CAN controller"
-       depends on ARM
+       depends on ARCH_AT91 || COMPILE_TEST
        ---help---
          This is a driver for the SoC CAN controller in Atmel's AT91SAM9263
          and AT91SAM9X5 processors.
@@ -77,12 +77,6 @@ config CAN_TI_HECC
          Driver for TI HECC (High End CAN Controller) module found on many
          TI devices. The device specifications are available from www.ti.com
 
-config CAN_MCP251X
-       tristate "Microchip MCP251x SPI CAN controllers"
-       depends on SPI && HAS_DMA
-       ---help---
-         Driver for the Microchip MCP251x SPI CAN controllers.
-
 config CAN_BFIN
        depends on BF534 || BF536 || BF537 || BF538 || BF539 || BF54x
        tristate "Analog Devices Blackfin on-chip CAN"
@@ -110,7 +104,7 @@ config CAN_FLEXCAN
 
 config PCH_CAN
        tristate "Intel EG20T PCH CAN controller"
-       depends on PCI
+       depends on PCI && (X86_32 || COMPILE_TEST)
        ---help---
          This driver is for PCH CAN of Topcliff (Intel EG20T PCH) which
          is an IOH for x86 embedded processor (Intel Atom E6xx series).
@@ -125,6 +119,16 @@ config CAN_GRCAN
          endian syntheses of the cores would need some modifications on
          the hardware level to work.
 
+config CAN_RCAR
+       tristate "Renesas R-Car CAN controller"
+       depends on ARM
+       ---help---
+         Say Y here if you want to use CAN controller found on Renesas R-Car
+         SoCs.
+
+         To compile this driver as a module, choose M here: the module will
+         be called rcar_can.
+
 source "drivers/net/can/mscan/Kconfig"
 
 source "drivers/net/can/sja1000/Kconfig"
@@ -133,6 +137,8 @@ source "drivers/net/can/c_can/Kconfig"
 
 source "drivers/net/can/cc770/Kconfig"
 
+source "drivers/net/can/spi/Kconfig"
+
 source "drivers/net/can/usb/Kconfig"
 
 source "drivers/net/can/softing/Kconfig"
index c7440392adbbaaabd5ca5b8ba872ba18c23f41d2..90f538c73f8c4ac76f793ff810bc4a476de327e8 100644 (file)
@@ -10,6 +10,7 @@ can-dev-y                     := dev.o
 
 can-dev-$(CONFIG_CAN_LEDS)     += led.o
 
+obj-y                          += spi/
 obj-y                          += usb/
 obj-y                          += softing/
 
@@ -19,11 +20,11 @@ obj-$(CONFIG_CAN_C_CAN)             += c_can/
 obj-$(CONFIG_CAN_CC770)                += cc770/
 obj-$(CONFIG_CAN_AT91)         += at91_can.o
 obj-$(CONFIG_CAN_TI_HECC)      += ti_hecc.o
-obj-$(CONFIG_CAN_MCP251X)      += mcp251x.o
 obj-$(CONFIG_CAN_BFIN)         += bfin_can.o
 obj-$(CONFIG_CAN_JANZ_ICAN3)   += janz-ican3.o
 obj-$(CONFIG_CAN_FLEXCAN)      += flexcan.o
 obj-$(CONFIG_PCH_CAN)          += pch_can.o
 obj-$(CONFIG_CAN_GRCAN)                += grcan.o
+obj-$(CONFIG_CAN_RCAR)         += rcar_can.o
 
 ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG
index 61ffc12d8fd8e4e01056b06fd3ae73be7abe513e..8ab7103d4f44ea616ae8cb945eda8ea84aa3059b 100644 (file)
@@ -14,6 +14,13 @@ config CAN_C_CAN_PLATFORM
          SPEAr1310 and SPEAr320 evaluation boards & TI (www.ti.com)
          boards like am335x, dm814x, dm813x and dm811x.
 
+config CAN_C_CAN_STRICT_FRAME_ORDERING
+       bool "Force a strict RX CAN frame order (may cause frame loss)"
+       ---help---
+         The RX split buffer prevents packet reordering but can cause packet
+         loss. Only enable this option when you accept to lose CAN frames
+         in favour of getting the received CAN frames in the correct order.
+
 config CAN_C_CAN_PCI
        tristate "Generic PCI Bus based C_CAN/D_CAN driver"
        depends on PCI
index a5c8dcfa83579376a36105c8c055e63f792c42ad..e154b4cb0f1a3637a3838215c6d8716a4a658aa0 100644 (file)
@@ -60,6 +60,8 @@
 #define CONTROL_IE             BIT(1)
 #define CONTROL_INIT           BIT(0)
 
+#define CONTROL_IRQMSK         (CONTROL_EIE | CONTROL_IE | CONTROL_SIE)
+
 /* test register */
 #define TEST_RX                        BIT(7)
 #define TEST_TX1               BIT(6)
 #define IF_COMM_CONTROL                BIT(4)
 #define IF_COMM_CLR_INT_PND    BIT(3)
 #define IF_COMM_TXRQST         BIT(2)
+#define IF_COMM_CLR_NEWDAT     IF_COMM_TXRQST
 #define IF_COMM_DATAA          BIT(1)
 #define IF_COMM_DATAB          BIT(0)
-#define IF_COMM_ALL            (IF_COMM_MASK | IF_COMM_ARB | \
-                               IF_COMM_CONTROL | IF_COMM_TXRQST | \
-                               IF_COMM_DATAA | IF_COMM_DATAB)
+
+/* TX buffer setup */
+#define IF_COMM_TX             (IF_COMM_ARB | IF_COMM_CONTROL | \
+                                IF_COMM_TXRQST |                \
+                                IF_COMM_DATAA | IF_COMM_DATAB)
 
 /* For the low buffers we clear the interrupt bit, but keep newdat */
 #define IF_COMM_RCV_LOW                (IF_COMM_MASK | IF_COMM_ARB | \
                                 IF_COMM_DATAA | IF_COMM_DATAB)
 
 /* For the high buffers we clear the interrupt bit and newdat */
-#define IF_COMM_RCV_HIGH       (IF_COMM_RCV_LOW | IF_COMM_TXRQST)
+#define IF_COMM_RCV_HIGH       (IF_COMM_RCV_LOW | IF_COMM_CLR_NEWDAT)
+
+
+/* Receive setup of message objects */
+#define IF_COMM_RCV_SETUP      (IF_COMM_MASK | IF_COMM_ARB | IF_COMM_CONTROL)
+
+/* Invalidation of message objects */
+#define IF_COMM_INVAL          (IF_COMM_ARB | IF_COMM_CONTROL)
 
 /* IFx arbitration */
-#define IF_ARB_MSGVAL          BIT(15)
-#define IF_ARB_MSGXTD          BIT(14)
-#define IF_ARB_TRANSMIT                BIT(13)
+#define IF_ARB_MSGVAL          BIT(31)
+#define IF_ARB_MSGXTD          BIT(30)
+#define IF_ARB_TRANSMIT                BIT(29)
 
 /* IFx message control */
 #define IF_MCONT_NEWDAT                BIT(15)
 #define IF_MCONT_EOB           BIT(7)
 #define IF_MCONT_DLC_MASK      0xf
 
+#define IF_MCONT_RCV           (IF_MCONT_RXIE | IF_MCONT_UMASK)
+#define IF_MCONT_RCV_EOB       (IF_MCONT_RCV | IF_MCONT_EOB)
+
+#define IF_MCONT_TX            (IF_MCONT_TXIE | IF_MCONT_EOB)
+
 /*
  * Use IF1 for RX and IF2 for TX
  */
 #define IF_RX                  0
 #define IF_TX                  1
 
-/* status interrupt */
-#define STATUS_INTERRUPT       0x8000
-
-/* global interrupt masks */
-#define ENABLE_ALL_INTERRUPTS  1
-#define DISABLE_ALL_INTERRUPTS 0
-
 /* minimum timeout for checking BUSY status */
 #define MIN_TIMEOUT_VALUE      6
 
@@ -171,6 +181,7 @@ enum c_can_lec_type {
        LEC_BIT0_ERROR,
        LEC_CRC_ERROR,
        LEC_UNUSED,
+       LEC_MASK = LEC_UNUSED,
 };
 
 /*
@@ -226,143 +237,113 @@ static inline void c_can_reset_ram(const struct c_can_priv *priv, bool enable)
                priv->raminit(priv, enable);
 }
 
-static inline int get_tx_next_msg_obj(const struct c_can_priv *priv)
+static void c_can_irq_control(struct c_can_priv *priv, bool enable)
 {
-       return (priv->tx_next & C_CAN_NEXT_MSG_OBJ_MASK) +
-                       C_CAN_MSG_OBJ_TX_FIRST;
-}
-
-static inline int get_tx_echo_msg_obj(int txecho)
-{
-       return (txecho & C_CAN_NEXT_MSG_OBJ_MASK) + C_CAN_MSG_OBJ_TX_FIRST;
-}
-
-static u32 c_can_read_reg32(struct c_can_priv *priv, enum reg index)
-{
-       u32 val = priv->read_reg(priv, index);
-       val |= ((u32) priv->read_reg(priv, index + 1)) << 16;
-       return val;
-}
-
-static void c_can_enable_all_interrupts(struct c_can_priv *priv,
-                                               int enable)
-{
-       unsigned int cntrl_save = priv->read_reg(priv,
-                                               C_CAN_CTRL_REG);
+       u32 ctrl = priv->read_reg(priv, C_CAN_CTRL_REG) & ~CONTROL_IRQMSK;
 
        if (enable)
-               cntrl_save |= (CONTROL_SIE | CONTROL_EIE | CONTROL_IE);
-       else
-               cntrl_save &= ~(CONTROL_EIE | CONTROL_IE | CONTROL_SIE);
+               ctrl |= CONTROL_IRQMSK;
 
-       priv->write_reg(priv, C_CAN_CTRL_REG, cntrl_save);
+       priv->write_reg(priv, C_CAN_CTRL_REG, ctrl);
 }
 
-static inline int c_can_msg_obj_is_busy(struct c_can_priv *priv, int iface)
+static void c_can_obj_update(struct net_device *dev, int iface, u32 cmd, u32 obj)
 {
-       int count = MIN_TIMEOUT_VALUE;
+       struct c_can_priv *priv = netdev_priv(dev);
+       int cnt, reg = C_CAN_IFACE(COMREQ_REG, iface);
+
+       priv->write_reg32(priv, reg, (cmd << 16) | obj);
 
-       while (count && priv->read_reg(priv,
-                               C_CAN_IFACE(COMREQ_REG, iface)) &
-                               IF_COMR_BUSY) {
-               count--;
+       for (cnt = MIN_TIMEOUT_VALUE; cnt; cnt--) {
+               if (!(priv->read_reg(priv, reg) & IF_COMR_BUSY))
+                       return;
                udelay(1);
        }
+       netdev_err(dev, "Updating object timed out\n");
 
-       if (!count)
-               return 1;
+}
 
-       return 0;
+static inline void c_can_object_get(struct net_device *dev, int iface,
+                                   u32 obj, u32 cmd)
+{
+       c_can_obj_update(dev, iface, cmd, obj);
 }
 
-static inline void c_can_object_get(struct net_device *dev,
-                                       int iface, int objno, int mask)
+static inline void c_can_object_put(struct net_device *dev, int iface,
+                                   u32 obj, u32 cmd)
 {
-       struct c_can_priv *priv = netdev_priv(dev);
+       c_can_obj_update(dev, iface, cmd | IF_COMM_WR, obj);
+}
 
-       /*
-        * As per specs, after writting the message object number in the
-        * IF command request register the transfer b/w interface
-        * register and message RAM must be complete in 6 CAN-CLK
-        * period.
-        */
-       priv->write_reg(priv, C_CAN_IFACE(COMMSK_REG, iface),
-                       IFX_WRITE_LOW_16BIT(mask));
-       priv->write_reg(priv, C_CAN_IFACE(COMREQ_REG, iface),
-                       IFX_WRITE_LOW_16BIT(objno));
+/*
+ * Note: According to documentation clearing TXIE while MSGVAL is set
+ * is not allowed, but works nicely on C/DCAN. And that lowers the I/O
+ * load significantly.
+ */
+static void c_can_inval_tx_object(struct net_device *dev, int iface, int obj)
+{
+       struct c_can_priv *priv = netdev_priv(dev);
 
-       if (c_can_msg_obj_is_busy(priv, iface))
-               netdev_err(dev, "timed out in object get\n");
+       priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface), 0);
+       c_can_object_put(dev, iface, obj, IF_COMM_INVAL);
 }
 
-static inline void c_can_object_put(struct net_device *dev,
-                                       int iface, int objno, int mask)
+static void c_can_inval_msg_object(struct net_device *dev, int iface, int obj)
 {
        struct c_can_priv *priv = netdev_priv(dev);
 
-       /*
-        * As per specs, after writting the message object number in the
-        * IF command request register the transfer b/w interface
-        * register and message RAM must be complete in 6 CAN-CLK
-        * period.
-        */
-       priv->write_reg(priv, C_CAN_IFACE(COMMSK_REG, iface),
-                       (IF_COMM_WR | IFX_WRITE_LOW_16BIT(mask)));
-       priv->write_reg(priv, C_CAN_IFACE(COMREQ_REG, iface),
-                       IFX_WRITE_LOW_16BIT(objno));
-
-       if (c_can_msg_obj_is_busy(priv, iface))
-               netdev_err(dev, "timed out in object put\n");
+       priv->write_reg(priv, C_CAN_IFACE(ARB1_REG, iface), 0);
+       priv->write_reg(priv, C_CAN_IFACE(ARB2_REG, iface), 0);
+       c_can_inval_tx_object(dev, iface, obj);
 }
 
-static void c_can_write_msg_object(struct net_device *dev,
-                       int iface, struct can_frame *frame, int objno)
+static void c_can_setup_tx_object(struct net_device *dev, int iface,
+                                 struct can_frame *frame, int idx)
 {
-       int i;
-       u16 flags = 0;
-       unsigned int id;
        struct c_can_priv *priv = netdev_priv(dev);
-
-       if (!(frame->can_id & CAN_RTR_FLAG))
-               flags |= IF_ARB_TRANSMIT;
+       u16 ctrl = IF_MCONT_TX | frame->can_dlc;
+       bool rtr = frame->can_id & CAN_RTR_FLAG;
+       u32 arb = IF_ARB_MSGVAL;
+       int i;
 
        if (frame->can_id & CAN_EFF_FLAG) {
-               id = frame->can_id & CAN_EFF_MASK;
-               flags |= IF_ARB_MSGXTD;
-       } else
-               id = ((frame->can_id & CAN_SFF_MASK) << 18);
+               arb |= frame->can_id & CAN_EFF_MASK;
+               arb |= IF_ARB_MSGXTD;
+       } else {
+               arb |= (frame->can_id & CAN_SFF_MASK) << 18;
+       }
+
+       if (!rtr)
+               arb |= IF_ARB_TRANSMIT;
 
-       flags |= IF_ARB_MSGVAL;
+       /*
+        * If we change the DIR bit, we need to invalidate the buffer
+        * first, i.e. clear the MSGVAL flag in the arbiter.
+        */
+       if (rtr != (bool)test_bit(idx, &priv->tx_dir)) {
+               u32 obj = idx + C_CAN_MSG_OBJ_TX_FIRST;
+
+               c_can_inval_msg_object(dev, iface, obj);
+               change_bit(idx, &priv->tx_dir);
+       }
+
+       priv->write_reg32(priv, C_CAN_IFACE(ARB1_REG, iface), arb);
 
-       priv->write_reg(priv, C_CAN_IFACE(ARB1_REG, iface),
-                               IFX_WRITE_LOW_16BIT(id));
-       priv->write_reg(priv, C_CAN_IFACE(ARB2_REG, iface), flags |
-                               IFX_WRITE_HIGH_16BIT(id));
+       priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface), ctrl);
 
        for (i = 0; i < frame->can_dlc; i += 2) {
                priv->write_reg(priv, C_CAN_IFACE(DATA1_REG, iface) + i / 2,
                                frame->data[i] | (frame->data[i + 1] << 8));
        }
-
-       /* enable interrupt for this message object */
-       priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface),
-                       IF_MCONT_TXIE | IF_MCONT_TXRQST | IF_MCONT_EOB |
-                       frame->can_dlc);
-       c_can_object_put(dev, iface, objno, IF_COMM_ALL);
 }
 
 static inline void c_can_activate_all_lower_rx_msg_obj(struct net_device *dev,
-                                               int iface,
-                                               int ctrl_mask)
+                                                      int iface)
 {
        int i;
-       struct c_can_priv *priv = netdev_priv(dev);
 
-       for (i = C_CAN_MSG_OBJ_RX_FIRST; i <= C_CAN_MSG_RX_LOW_LAST; i++) {
-               priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface),
-                               ctrl_mask & ~IF_MCONT_NEWDAT);
-               c_can_object_put(dev, iface, i, IF_COMM_CONTROL);
-       }
+       for (i = C_CAN_MSG_OBJ_RX_FIRST; i <= C_CAN_MSG_RX_LOW_LAST; i++)
+               c_can_object_get(dev, iface, i, IF_COMM_CLR_NEWDAT);
 }
 
 static int c_can_handle_lost_msg_obj(struct net_device *dev,
@@ -377,6 +358,9 @@ static int c_can_handle_lost_msg_obj(struct net_device *dev,
        priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface), ctrl);
        c_can_object_put(dev, iface, objno, IF_COMM_CONTROL);
 
+       stats->rx_errors++;
+       stats->rx_over_errors++;
+
        /* create an error msg */
        skb = alloc_can_err_skb(dev, &frame);
        if (unlikely(!skb))
@@ -384,22 +368,18 @@ static int c_can_handle_lost_msg_obj(struct net_device *dev,
 
        frame->can_id |= CAN_ERR_CRTL;
        frame->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
-       stats->rx_errors++;
-       stats->rx_over_errors++;
 
        netif_receive_skb(skb);
        return 1;
 }
 
-static int c_can_read_msg_object(struct net_device *dev, int iface, int ctrl)
+static int c_can_read_msg_object(struct net_device *dev, int iface, u32 ctrl)
 {
-       u16 flags, data;
-       int i;
-       unsigned int val;
-       struct c_can_priv *priv = netdev_priv(dev);
        struct net_device_stats *stats = &dev->stats;
-       struct sk_buff *skb;
+       struct c_can_priv *priv = netdev_priv(dev);
        struct can_frame *frame;
+       struct sk_buff *skb;
+       u32 arb, data;
 
        skb = alloc_can_skb(dev, &frame);
        if (!skb) {
@@ -409,115 +389,79 @@ static int c_can_read_msg_object(struct net_device *dev, int iface, int ctrl)
 
        frame->can_dlc = get_can_dlc(ctrl & 0x0F);
 
-       flags = priv->read_reg(priv, C_CAN_IFACE(ARB2_REG, iface));
-       val = priv->read_reg(priv, C_CAN_IFACE(ARB1_REG, iface)) |
-               (flags << 16);
+       arb = priv->read_reg32(priv, C_CAN_IFACE(ARB1_REG, iface));
 
-       if (flags & IF_ARB_MSGXTD)
-               frame->can_id = (val & CAN_EFF_MASK) | CAN_EFF_FLAG;
+       if (arb & IF_ARB_MSGXTD)
+               frame->can_id = (arb & CAN_EFF_MASK) | CAN_EFF_FLAG;
        else
-               frame->can_id = (val >> 18) & CAN_SFF_MASK;
+               frame->can_id = (arb >> 18) & CAN_SFF_MASK;
 
-       if (flags & IF_ARB_TRANSMIT)
+       if (arb & IF_ARB_TRANSMIT) {
                frame->can_id |= CAN_RTR_FLAG;
-       else {
-               for (i = 0; i < frame->can_dlc; i += 2) {
-                       data = priv->read_reg(priv,
-                               C_CAN_IFACE(DATA1_REG, iface) + i / 2);
+       } else {
+               int i, dreg = C_CAN_IFACE(DATA1_REG, iface);
+
+               for (i = 0; i < frame->can_dlc; i += 2, dreg ++) {
+                       data = priv->read_reg(priv, dreg);
                        frame->data[i] = data;
                        frame->data[i + 1] = data >> 8;
                }
        }
 
-       netif_receive_skb(skb);
-
        stats->rx_packets++;
        stats->rx_bytes += frame->can_dlc;
+
+       netif_receive_skb(skb);
        return 0;
 }
 
 static void c_can_setup_receive_object(struct net_device *dev, int iface,
-                                       int objno, unsigned int mask,
-                                       unsigned int id, unsigned int mcont)
+                                      u32 obj, u32 mask, u32 id, u32 mcont)
 {
        struct c_can_priv *priv = netdev_priv(dev);
 
-       priv->write_reg(priv, C_CAN_IFACE(MASK1_REG, iface),
-                       IFX_WRITE_LOW_16BIT(mask));
-
-       /* According to C_CAN documentation, the reserved bit
-        * in IFx_MASK2 register is fixed 1
-        */
-       priv->write_reg(priv, C_CAN_IFACE(MASK2_REG, iface),
-                       IFX_WRITE_HIGH_16BIT(mask) | BIT(13));
+       mask |= BIT(29);
+       priv->write_reg32(priv, C_CAN_IFACE(MASK1_REG, iface), mask);
 
-       priv->write_reg(priv, C_CAN_IFACE(ARB1_REG, iface),
-                       IFX_WRITE_LOW_16BIT(id));
-       priv->write_reg(priv, C_CAN_IFACE(ARB2_REG, iface),
-                       (IF_ARB_MSGVAL | IFX_WRITE_HIGH_16BIT(id)));
+       id |= IF_ARB_MSGVAL;
+       priv->write_reg32(priv, C_CAN_IFACE(ARB1_REG, iface), id);
 
        priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface), mcont);
-       c_can_object_put(dev, iface, objno, IF_COMM_ALL & ~IF_COMM_TXRQST);
-
-       netdev_dbg(dev, "obj no:%d, msgval:0x%08x\n", objno,
-                       c_can_read_reg32(priv, C_CAN_MSGVAL1_REG));
-}
-
-static void c_can_inval_msg_object(struct net_device *dev, int iface, int objno)
-{
-       struct c_can_priv *priv = netdev_priv(dev);
-
-       priv->write_reg(priv, C_CAN_IFACE(ARB1_REG, iface), 0);
-       priv->write_reg(priv, C_CAN_IFACE(ARB2_REG, iface), 0);
-       priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface), 0);
-
-       c_can_object_put(dev, iface, objno, IF_COMM_ARB | IF_COMM_CONTROL);
-
-       netdev_dbg(dev, "obj no:%d, msgval:0x%08x\n", objno,
-                       c_can_read_reg32(priv, C_CAN_MSGVAL1_REG));
-}
-
-static inline int c_can_is_next_tx_obj_busy(struct c_can_priv *priv, int objno)
-{
-       int val = c_can_read_reg32(priv, C_CAN_TXRQST1_REG);
-
-       /*
-        * as transmission request register's bit n-1 corresponds to
-        * message object n, we need to handle the same properly.
-        */
-       if (val & (1 << (objno - 1)))
-               return 1;
-
-       return 0;
+       c_can_object_put(dev, iface, obj, IF_COMM_RCV_SETUP);
 }
 
 static netdev_tx_t c_can_start_xmit(struct sk_buff *skb,
-                                       struct net_device *dev)
+                                   struct net_device *dev)
 {
-       u32 msg_obj_no;
-       struct c_can_priv *priv = netdev_priv(dev);
        struct can_frame *frame = (struct can_frame *)skb->data;
+       struct c_can_priv *priv = netdev_priv(dev);
+       u32 idx, obj;
 
        if (can_dropped_invalid_skb(dev, skb))
                return NETDEV_TX_OK;
-
-       spin_lock_bh(&priv->xmit_lock);
-       msg_obj_no = get_tx_next_msg_obj(priv);
-
-       /* prepare message object for transmission */
-       c_can_write_msg_object(dev, IF_TX, frame, msg_obj_no);
-       priv->dlc[msg_obj_no - C_CAN_MSG_OBJ_TX_FIRST] = frame->can_dlc;
-       can_put_echo_skb(skb, dev, msg_obj_no - C_CAN_MSG_OBJ_TX_FIRST);
-
        /*
-        * we have to stop the queue in case of a wrap around or
-        * if the next TX message object is still in use
+        * This is not a FIFO. C/D_CAN sends out the buffers
+        * prioritized. The lowest buffer number wins.
         */
-       priv->tx_next++;
-       if (c_can_is_next_tx_obj_busy(priv, get_tx_next_msg_obj(priv)) ||
-                       (priv->tx_next & C_CAN_NEXT_MSG_OBJ_MASK) == 0)
+       idx = fls(atomic_read(&priv->tx_active));
+       obj = idx + C_CAN_MSG_OBJ_TX_FIRST;
+
+       /* If this is the last buffer, stop the xmit queue */
+       if (idx == C_CAN_MSG_OBJ_TX_NUM - 1)
                netif_stop_queue(dev);
-       spin_unlock_bh(&priv->xmit_lock);
+       /*
+        * Store the message in the interface so we can call
+        * can_put_echo_skb(). We must do this before we enable
+        * transmit as we might race against do_tx().
+        */
+       c_can_setup_tx_object(dev, IF_TX, frame, idx);
+       priv->dlc[idx] = frame->can_dlc;
+       can_put_echo_skb(skb, dev, idx);
+
+       /* Update the active bits */
+       atomic_add((1 << idx), &priv->tx_active);
+       /* Start transmission */
+       c_can_object_put(dev, IF_TX, obj, IF_COMM_TX);
 
        return NETDEV_TX_OK;
 }
@@ -594,11 +538,10 @@ static void c_can_configure_msg_objects(struct net_device *dev)
 
        /* setup receive message objects */
        for (i = C_CAN_MSG_OBJ_RX_FIRST; i < C_CAN_MSG_OBJ_RX_LAST; i++)
-               c_can_setup_receive_object(dev, IF_RX, i, 0, 0,
-                       (IF_MCONT_RXIE | IF_MCONT_UMASK) & ~IF_MCONT_EOB);
+               c_can_setup_receive_object(dev, IF_RX, i, 0, 0, IF_MCONT_RCV);
 
        c_can_setup_receive_object(dev, IF_RX, C_CAN_MSG_OBJ_RX_LAST, 0, 0,
-                       IF_MCONT_EOB | IF_MCONT_RXIE | IF_MCONT_UMASK);
+                                  IF_MCONT_RCV_EOB);
 }
 
 /*
@@ -612,30 +555,22 @@ static int c_can_chip_config(struct net_device *dev)
        struct c_can_priv *priv = netdev_priv(dev);
 
        /* enable automatic retransmission */
-       priv->write_reg(priv, C_CAN_CTRL_REG,
-                       CONTROL_ENABLE_AR);
+       priv->write_reg(priv, C_CAN_CTRL_REG, CONTROL_ENABLE_AR);
 
        if ((priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) &&
            (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK)) {
                /* loopback + silent mode : useful for hot self-test */
-               priv->write_reg(priv, C_CAN_CTRL_REG, CONTROL_EIE |
-                               CONTROL_SIE | CONTROL_IE | CONTROL_TEST);
-               priv->write_reg(priv, C_CAN_TEST_REG,
-                               TEST_LBACK | TEST_SILENT);
+               priv->write_reg(priv, C_CAN_CTRL_REG, CONTROL_TEST);
+               priv->write_reg(priv, C_CAN_TEST_REG, TEST_LBACK | TEST_SILENT);
        } else if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) {
                /* loopback mode : useful for self-test function */
-               priv->write_reg(priv, C_CAN_CTRL_REG, CONTROL_EIE |
-                               CONTROL_SIE | CONTROL_IE | CONTROL_TEST);
+               priv->write_reg(priv, C_CAN_CTRL_REG, CONTROL_TEST);
                priv->write_reg(priv, C_CAN_TEST_REG, TEST_LBACK);
        } else if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) {
                /* silent mode : bus-monitoring mode */
-               priv->write_reg(priv, C_CAN_CTRL_REG, CONTROL_EIE |
-                               CONTROL_SIE | CONTROL_IE | CONTROL_TEST);
+               priv->write_reg(priv, C_CAN_CTRL_REG, CONTROL_TEST);
                priv->write_reg(priv, C_CAN_TEST_REG, TEST_SILENT);
-       } else
-               /* normal mode*/
-               priv->write_reg(priv, C_CAN_CTRL_REG,
-                               CONTROL_EIE | CONTROL_SIE | CONTROL_IE);
+       }
 
        /* configure message objects */
        c_can_configure_msg_objects(dev);
@@ -643,6 +578,11 @@ static int c_can_chip_config(struct net_device *dev)
        /* set a `lec` value so that we can check for updates later */
        priv->write_reg(priv, C_CAN_STS_REG, LEC_UNUSED);
 
+       /* Clear all internal status */
+       atomic_set(&priv->tx_active, 0);
+       priv->rxmasked = 0;
+       priv->tx_dir = 0;
+
        /* set bittiming params */
        return c_can_set_bittiming(dev);
 }
@@ -657,13 +597,11 @@ static int c_can_start(struct net_device *dev)
        if (err)
                return err;
 
-       priv->can.state = CAN_STATE_ERROR_ACTIVE;
-
-       /* reset tx helper pointers */
-       priv->tx_next = priv->tx_echo = 0;
+       /* Setup the command for new messages */
+       priv->comm_rcv_high = priv->type != BOSCH_D_CAN ?
+               IF_COMM_RCV_LOW : IF_COMM_RCV_HIGH;
 
-       /* enable status change, error and module interrupts */
-       c_can_enable_all_interrupts(priv, ENABLE_ALL_INTERRUPTS);
+       priv->can.state = CAN_STATE_ERROR_ACTIVE;
 
        return 0;
 }
@@ -672,15 +610,13 @@ static void c_can_stop(struct net_device *dev)
 {
        struct c_can_priv *priv = netdev_priv(dev);
 
-       /* disable all interrupts */
-       c_can_enable_all_interrupts(priv, DISABLE_ALL_INTERRUPTS);
-
-       /* set the state as STOPPED */
+       c_can_irq_control(priv, false);
        priv->can.state = CAN_STATE_STOPPED;
 }
 
 static int c_can_set_mode(struct net_device *dev, enum can_mode mode)
 {
+       struct c_can_priv *priv = netdev_priv(dev);
        int err;
 
        switch (mode) {
@@ -689,6 +625,7 @@ static int c_can_set_mode(struct net_device *dev, enum can_mode mode)
                if (err)
                        return err;
                netif_wake_queue(dev);
+               c_can_irq_control(priv, true);
                break;
        default:
                return -EOPNOTSUPP;
@@ -724,42 +661,29 @@ static int c_can_get_berr_counter(const struct net_device *dev,
        return err;
 }
 
-/*
- * priv->tx_echo holds the number of the oldest can_frame put for
- * transmission into the hardware, but not yet ACKed by the CAN tx
- * complete IRQ.
- *
- * We iterate from priv->tx_echo to priv->tx_next and check if the
- * packet has been transmitted, echo it back to the CAN framework.
- * If we discover a not yet transmitted packet, stop looking for more.
- */
 static void c_can_do_tx(struct net_device *dev)
 {
        struct c_can_priv *priv = netdev_priv(dev);
        struct net_device_stats *stats = &dev->stats;
-       u32 val, obj, pkts = 0, bytes = 0;
-
-       spin_lock_bh(&priv->xmit_lock);
+       u32 idx, obj, pkts = 0, bytes = 0, pend, clr;
 
-       for (; (priv->tx_next - priv->tx_echo) > 0; priv->tx_echo++) {
-               obj = get_tx_echo_msg_obj(priv->tx_echo);
-               val = c_can_read_reg32(priv, C_CAN_TXRQST1_REG);
+       clr = pend = priv->read_reg(priv, C_CAN_INTPND2_REG);
 
-               if (val & (1 << (obj - 1)))
-                       break;
-
-               can_get_echo_skb(dev, obj - C_CAN_MSG_OBJ_TX_FIRST);
-               bytes += priv->dlc[obj - C_CAN_MSG_OBJ_TX_FIRST];
+       while ((idx = ffs(pend))) {
+               idx--;
+               pend &= ~(1 << idx);
+               obj = idx + C_CAN_MSG_OBJ_TX_FIRST;
+               c_can_inval_tx_object(dev, IF_RX, obj);
+               can_get_echo_skb(dev, idx);
+               bytes += priv->dlc[idx];
                pkts++;
-               c_can_inval_msg_object(dev, IF_TX, obj);
        }
 
-       /* restart queue if wrap-up or if queue stalled on last pkt */
-       if (((priv->tx_next & C_CAN_NEXT_MSG_OBJ_MASK) != 0) ||
-                       ((priv->tx_echo & C_CAN_NEXT_MSG_OBJ_MASK) == 0))
-               netif_wake_queue(dev);
+       /* Clear the bits in the tx_active mask */
+       atomic_sub(clr, &priv->tx_active);
 
-       spin_unlock_bh(&priv->xmit_lock);
+       if (clr & (1 << (C_CAN_MSG_OBJ_TX_NUM - 1)))
+               netif_wake_queue(dev);
 
        if (pkts) {
                stats->tx_bytes += bytes;
@@ -800,18 +724,42 @@ static u32 c_can_adjust_pending(u32 pend)
        return pend & ~((1 << lasts) - 1);
 }
 
+static inline void c_can_rx_object_get(struct net_device *dev,
+                                      struct c_can_priv *priv, u32 obj)
+{
+#ifdef CONFIG_CAN_C_CAN_STRICT_FRAME_ORDERING
+       if (obj < C_CAN_MSG_RX_LOW_LAST)
+               c_can_object_get(dev, IF_RX, obj, IF_COMM_RCV_LOW);
+       else
+#endif
+               c_can_object_get(dev, IF_RX, obj, priv->comm_rcv_high);
+}
+
+static inline void c_can_rx_finalize(struct net_device *dev,
+                                    struct c_can_priv *priv, u32 obj)
+{
+#ifdef CONFIG_CAN_C_CAN_STRICT_FRAME_ORDERING
+       if (obj < C_CAN_MSG_RX_LOW_LAST)
+               priv->rxmasked |= BIT(obj - 1);
+       else if (obj == C_CAN_MSG_RX_LOW_LAST) {
+               priv->rxmasked = 0;
+               /* activate all lower message objects */
+               c_can_activate_all_lower_rx_msg_obj(dev, IF_RX);
+       }
+#endif
+       if (priv->type != BOSCH_D_CAN)
+               c_can_object_get(dev, IF_RX, obj, IF_COMM_CLR_NEWDAT);
+}
+
 static int c_can_read_objects(struct net_device *dev, struct c_can_priv *priv,
                              u32 pend, int quota)
 {
-       u32 pkts = 0, ctrl, obj, mcmd;
+       u32 pkts = 0, ctrl, obj;
 
        while ((obj = ffs(pend)) && quota > 0) {
                pend &= ~BIT(obj - 1);
 
-               mcmd = obj < C_CAN_MSG_RX_LOW_LAST ?
-                       IF_COMM_RCV_LOW : IF_COMM_RCV_HIGH;
-
-               c_can_object_get(dev, IF_RX, obj, mcmd);
+               c_can_rx_object_get(dev, priv, obj);
                ctrl = priv->read_reg(priv, C_CAN_IFACE(MSGCTRL_REG, IF_RX));
 
                if (ctrl & IF_MCONT_MSGLST) {
@@ -833,9 +781,7 @@ static int c_can_read_objects(struct net_device *dev, struct c_can_priv *priv,
                /* read the data from the message object */
                c_can_read_msg_object(dev, IF_RX, ctrl);
 
-               if (obj == C_CAN_MSG_RX_LOW_LAST)
-                       /* activate all lower message objects */
-                       c_can_activate_all_lower_rx_msg_obj(dev, IF_RX, ctrl);
+               c_can_rx_finalize(dev, priv, obj);
 
                pkts++;
                quota--;
@@ -844,6 +790,16 @@ static int c_can_read_objects(struct net_device *dev, struct c_can_priv *priv,
        return pkts;
 }
 
+static inline u32 c_can_get_pending(struct c_can_priv *priv)
+{
+       u32 pend = priv->read_reg(priv, C_CAN_NEWDAT1_REG);
+
+#ifdef CONFIG_CAN_C_CAN_STRICT_FRAME_ORDERING
+       pend &= ~priv->rxmasked;
+#endif
+       return pend;
+}
+
 /*
  * theory of operation:
  *
@@ -853,6 +809,8 @@ static int c_can_read_objects(struct net_device *dev, struct c_can_priv *priv,
  * has arrived. To work-around this issue, we keep two groups of message
  * objects whose partitioning is defined by C_CAN_MSG_OBJ_RX_SPLIT.
  *
+ * If CONFIG_CAN_C_CAN_STRICT_FRAME_ORDERING = y
+ *
  * To ensure in-order frame reception we use the following
  * approach while re-activating a message object to receive further
  * frames:
@@ -865,6 +823,14 @@ static int c_can_read_objects(struct net_device *dev, struct c_can_priv *priv,
  * - if the current message object number is greater than
  *   C_CAN_MSG_RX_LOW_LAST then clear the NEWDAT bit of
  *   only this message object.
+ *
+ * This can cause packet loss!
+ *
+ * If CONFIG_CAN_C_CAN_STRICT_FRAME_ORDERING = n
+ *
+ * We clear the newdat bit right away.
+ *
+ * This can result in packet reordering when the readout is slow.
  */
 static int c_can_do_rx_poll(struct net_device *dev, int quota)
 {
@@ -880,7 +846,7 @@ static int c_can_do_rx_poll(struct net_device *dev, int quota)
 
        while (quota > 0) {
                if (!pend) {
-                       pend = priv->read_reg(priv, C_CAN_INTPND1_REG);
+                       pend = c_can_get_pending(priv);
                        if (!pend)
                                break;
                        /*
@@ -905,12 +871,6 @@ static int c_can_do_rx_poll(struct net_device *dev, int quota)
        return pkts;
 }
 
-static inline int c_can_has_and_handle_berr(struct c_can_priv *priv)
-{
-       return (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) &&
-               (priv->current_status & LEC_UNUSED);
-}
-
 static int c_can_handle_state_change(struct net_device *dev,
                                enum c_can_bus_error_types error_type)
 {
@@ -922,6 +882,26 @@ static int c_can_handle_state_change(struct net_device *dev,
        struct sk_buff *skb;
        struct can_berr_counter bec;
 
+       switch (error_type) {
+       case C_CAN_ERROR_WARNING:
+               /* error warning state */
+               priv->can.can_stats.error_warning++;
+               priv->can.state = CAN_STATE_ERROR_WARNING;
+               break;
+       case C_CAN_ERROR_PASSIVE:
+               /* error passive state */
+               priv->can.can_stats.error_passive++;
+               priv->can.state = CAN_STATE_ERROR_PASSIVE;
+               break;
+       case C_CAN_BUS_OFF:
+               /* bus-off state */
+               priv->can.state = CAN_STATE_BUS_OFF;
+               can_bus_off(dev);
+               break;
+       default:
+               break;
+       }
+
        /* propagate the error condition to the CAN stack */
        skb = alloc_can_err_skb(dev, &cf);
        if (unlikely(!skb))
@@ -935,8 +915,6 @@ static int c_can_handle_state_change(struct net_device *dev,
        switch (error_type) {
        case C_CAN_ERROR_WARNING:
                /* error warning state */
-               priv->can.can_stats.error_warning++;
-               priv->can.state = CAN_STATE_ERROR_WARNING;
                cf->can_id |= CAN_ERR_CRTL;
                cf->data[1] = (bec.txerr > bec.rxerr) ?
                        CAN_ERR_CRTL_TX_WARNING :
@@ -947,8 +925,6 @@ static int c_can_handle_state_change(struct net_device *dev,
                break;
        case C_CAN_ERROR_PASSIVE:
                /* error passive state */
-               priv->can.can_stats.error_passive++;
-               priv->can.state = CAN_STATE_ERROR_PASSIVE;
                cf->can_id |= CAN_ERR_CRTL;
                if (rx_err_passive)
                        cf->data[1] |= CAN_ERR_CRTL_RX_PASSIVE;
@@ -960,22 +936,16 @@ static int c_can_handle_state_change(struct net_device *dev,
                break;
        case C_CAN_BUS_OFF:
                /* bus-off state */
-               priv->can.state = CAN_STATE_BUS_OFF;
                cf->can_id |= CAN_ERR_BUSOFF;
-               /*
-                * disable all interrupts in bus-off mode to ensure that
-                * the CPU is not hogged down
-                */
-               c_can_enable_all_interrupts(priv, DISABLE_ALL_INTERRUPTS);
                can_bus_off(dev);
                break;
        default:
                break;
        }
 
-       netif_receive_skb(skb);
        stats->rx_packets++;
        stats->rx_bytes += cf->can_dlc;
+       netif_receive_skb(skb);
 
        return 1;
 }
@@ -996,6 +966,13 @@ static int c_can_handle_bus_err(struct net_device *dev,
        if (lec_type == LEC_UNUSED || lec_type == LEC_NO_ERROR)
                return 0;
 
+       if (!(priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING))
+               return 0;
+
+       /* common for all type of bus errors */
+       priv->can.can_stats.bus_error++;
+       stats->rx_errors++;
+
        /* propagate the error condition to the CAN stack */
        skb = alloc_can_err_skb(dev, &cf);
        if (unlikely(!skb))
@@ -1005,10 +982,6 @@ static int c_can_handle_bus_err(struct net_device *dev,
         * check for 'last error code' which tells us the
         * type of the last error to occur on the CAN bus
         */
-
-       /* common for all type of bus errors */
-       priv->can.can_stats.bus_error++;
-       stats->rx_errors++;
        cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
        cf->data[2] |= CAN_ERR_PROT_UNSPEC;
 
@@ -1043,95 +1016,64 @@ static int c_can_handle_bus_err(struct net_device *dev,
                break;
        }
 
-       /* set a `lec` value so that we can check for updates later */
-       priv->write_reg(priv, C_CAN_STS_REG, LEC_UNUSED);
-
-       netif_receive_skb(skb);
        stats->rx_packets++;
        stats->rx_bytes += cf->can_dlc;
-
+       netif_receive_skb(skb);
        return 1;
 }
 
 static int c_can_poll(struct napi_struct *napi, int quota)
 {
-       u16 irqstatus;
-       int lec_type = 0;
-       int work_done = 0;
        struct net_device *dev = napi->dev;
        struct c_can_priv *priv = netdev_priv(dev);
+       u16 curr, last = priv->last_status;
+       int work_done = 0;
 
-       irqstatus = priv->irqstatus;
-       if (!irqstatus)
-               goto end;
+       priv->last_status = curr = priv->read_reg(priv, C_CAN_STS_REG);
+       /* Ack status on C_CAN. D_CAN is self clearing */
+       if (priv->type != BOSCH_D_CAN)
+               priv->write_reg(priv, C_CAN_STS_REG, LEC_UNUSED);
 
-       /* status events have the highest priority */
-       if (irqstatus == STATUS_INTERRUPT) {
-               priv->current_status = priv->read_reg(priv,
-                                       C_CAN_STS_REG);
-
-               /* handle Tx/Rx events */
-               if (priv->current_status & STATUS_TXOK)
-                       priv->write_reg(priv, C_CAN_STS_REG,
-                                       priv->current_status & ~STATUS_TXOK);
-
-               if (priv->current_status & STATUS_RXOK)
-                       priv->write_reg(priv, C_CAN_STS_REG,
-                                       priv->current_status & ~STATUS_RXOK);
-
-               /* handle state changes */
-               if ((priv->current_status & STATUS_EWARN) &&
-                               (!(priv->last_status & STATUS_EWARN))) {
-                       netdev_dbg(dev, "entered error warning state\n");
-                       work_done += c_can_handle_state_change(dev,
-                                               C_CAN_ERROR_WARNING);
-               }
-               if ((priv->current_status & STATUS_EPASS) &&
-                               (!(priv->last_status & STATUS_EPASS))) {
-                       netdev_dbg(dev, "entered error passive state\n");
-                       work_done += c_can_handle_state_change(dev,
-                                               C_CAN_ERROR_PASSIVE);
-               }
-               if ((priv->current_status & STATUS_BOFF) &&
-                               (!(priv->last_status & STATUS_BOFF))) {
-                       netdev_dbg(dev, "entered bus off state\n");
-                       work_done += c_can_handle_state_change(dev,
-                                               C_CAN_BUS_OFF);
-               }
+       /* handle state changes */
+       if ((curr & STATUS_EWARN) && (!(last & STATUS_EWARN))) {
+               netdev_dbg(dev, "entered error warning state\n");
+               work_done += c_can_handle_state_change(dev, C_CAN_ERROR_WARNING);
+       }
 
-               /* handle bus recovery events */
-               if ((!(priv->current_status & STATUS_BOFF)) &&
-                               (priv->last_status & STATUS_BOFF)) {
-                       netdev_dbg(dev, "left bus off state\n");
-                       priv->can.state = CAN_STATE_ERROR_ACTIVE;
-               }
-               if ((!(priv->current_status & STATUS_EPASS)) &&
-                               (priv->last_status & STATUS_EPASS)) {
-                       netdev_dbg(dev, "left error passive state\n");
-                       priv->can.state = CAN_STATE_ERROR_ACTIVE;
-               }
+       if ((curr & STATUS_EPASS) && (!(last & STATUS_EPASS))) {
+               netdev_dbg(dev, "entered error passive state\n");
+               work_done += c_can_handle_state_change(dev, C_CAN_ERROR_PASSIVE);
+       }
+
+       if ((curr & STATUS_BOFF) && (!(last & STATUS_BOFF))) {
+               netdev_dbg(dev, "entered bus off state\n");
+               work_done += c_can_handle_state_change(dev, C_CAN_BUS_OFF);
+               goto end;
+       }
 
-               priv->last_status = priv->current_status;
-
-               /* handle lec errors on the bus */
-               lec_type = c_can_has_and_handle_berr(priv);
-               if (lec_type)
-                       work_done += c_can_handle_bus_err(dev, lec_type);
-       } else if ((irqstatus >= C_CAN_MSG_OBJ_RX_FIRST) &&
-                       (irqstatus <= C_CAN_MSG_OBJ_RX_LAST)) {
-               /* handle events corresponding to receive message objects */
-               work_done += c_can_do_rx_poll(dev, (quota - work_done));
-       } else if ((irqstatus >= C_CAN_MSG_OBJ_TX_FIRST) &&
-                       (irqstatus <= C_CAN_MSG_OBJ_TX_LAST)) {
-               /* handle events corresponding to transmit message objects */
-               c_can_do_tx(dev);
+       /* handle bus recovery events */
+       if ((!(curr & STATUS_BOFF)) && (last & STATUS_BOFF)) {
+               netdev_dbg(dev, "left bus off state\n");
+               priv->can.state = CAN_STATE_ERROR_ACTIVE;
+       }
+       if ((!(curr & STATUS_EPASS)) && (last & STATUS_EPASS)) {
+               netdev_dbg(dev, "left error passive state\n");
+               priv->can.state = CAN_STATE_ERROR_ACTIVE;
        }
 
+       /* handle lec errors on the bus */
+       work_done += c_can_handle_bus_err(dev, curr & LEC_MASK);
+
+       /* Handle Tx/Rx events. We do this unconditionally */
+       work_done += c_can_do_rx_poll(dev, (quota - work_done));
+       c_can_do_tx(dev);
+
 end:
        if (work_done < quota) {
                napi_complete(napi);
-               /* enable all IRQs */
-               c_can_enable_all_interrupts(priv, ENABLE_ALL_INTERRUPTS);
+               /* enable all IRQs if we are not in bus off state */
+               if (priv->can.state != CAN_STATE_BUS_OFF)
+                       c_can_irq_control(priv, true);
        }
 
        return work_done;
@@ -1142,12 +1084,11 @@ static irqreturn_t c_can_isr(int irq, void *dev_id)
        struct net_device *dev = (struct net_device *)dev_id;
        struct c_can_priv *priv = netdev_priv(dev);
 
-       priv->irqstatus = priv->read_reg(priv, C_CAN_INT_REG);
-       if (!priv->irqstatus)
+       if (!priv->read_reg(priv, C_CAN_INT_REG))
                return IRQ_NONE;
 
        /* disable all interrupts and schedule the NAPI */
-       c_can_enable_all_interrupts(priv, DISABLE_ALL_INTERRUPTS);
+       c_can_irq_control(priv, false);
        napi_schedule(&priv->napi);
 
        return IRQ_HANDLED;
@@ -1184,6 +1125,8 @@ static int c_can_open(struct net_device *dev)
        can_led_event(dev, CAN_LED_EVENT_OPEN);
 
        napi_enable(&priv->napi);
+       /* enable status change, error and module interrupts */
+       c_can_irq_control(priv, true);
        netif_start_queue(dev);
 
        return 0;
@@ -1226,7 +1169,6 @@ struct net_device *alloc_c_can_dev(void)
                return NULL;
 
        priv = netdev_priv(dev);
-       spin_lock_init(&priv->xmit_lock);
        netif_napi_add(dev, &priv->napi, c_can_poll, C_CAN_NAPI_WEIGHT);
 
        priv->dev = dev;
@@ -1281,6 +1223,7 @@ int c_can_power_up(struct net_device *dev)
        u32 val;
        unsigned long time_out;
        struct c_can_priv *priv = netdev_priv(dev);
+       int ret;
 
        if (!(dev->flags & IFF_UP))
                return 0;
@@ -1307,7 +1250,11 @@ int c_can_power_up(struct net_device *dev)
        if (time_after(jiffies, time_out))
                return -ETIMEDOUT;
 
-       return c_can_start(dev);
+       ret = c_can_start(dev);
+       if (!ret)
+               c_can_irq_control(priv, true);
+
+       return ret;
 }
 EXPORT_SYMBOL_GPL(c_can_power_up);
 #endif
index faa8404162b397e4bd589c0b7b665c2f6ab1e4e4..99ad1aa576b045197f82780d64936f3b7fe5651a 100644 (file)
 #ifndef C_CAN_H
 #define C_CAN_H
 
-/*
- * IFx register masks:
- * allow easy operation on 16-bit registers when the
- * argument is 32-bit instead
- */
-#define IFX_WRITE_LOW_16BIT(x) ((x) & 0xFFFF)
-#define IFX_WRITE_HIGH_16BIT(x)        (((x) & 0xFFFF0000) >> 16)
-
 /* message object split */
 #define C_CAN_NO_OF_OBJECTS    32
 #define C_CAN_MSG_OBJ_RX_NUM   16
@@ -45,8 +37,6 @@
 
 #define C_CAN_MSG_OBJ_RX_SPLIT 9
 #define C_CAN_MSG_RX_LOW_LAST  (C_CAN_MSG_OBJ_RX_SPLIT - 1)
-
-#define C_CAN_NEXT_MSG_OBJ_MASK        (C_CAN_MSG_OBJ_TX_NUM - 1)
 #define RECEIVE_OBJECT_BITS    0x0000ffff
 
 enum reg {
@@ -88,6 +78,7 @@ enum reg {
        C_CAN_INTPND2_REG,
        C_CAN_MSGVAL1_REG,
        C_CAN_MSGVAL2_REG,
+       C_CAN_FUNCTION_REG,
 };
 
 static const u16 reg_map_c_can[] = {
@@ -139,6 +130,7 @@ static const u16 reg_map_d_can[] = {
        [C_CAN_BRPEXT_REG]      = 0x0E,
        [C_CAN_INT_REG]         = 0x10,
        [C_CAN_TEST_REG]        = 0x14,
+       [C_CAN_FUNCTION_REG]    = 0x18,
        [C_CAN_TXRQST1_REG]     = 0x88,
        [C_CAN_TXRQST2_REG]     = 0x8A,
        [C_CAN_NEWDAT1_REG]     = 0x9C,
@@ -183,23 +175,22 @@ struct c_can_priv {
        struct napi_struct napi;
        struct net_device *dev;
        struct device *device;
-       spinlock_t xmit_lock;
-       int tx_object;
-       int current_status;
+       atomic_t tx_active;
+       unsigned long tx_dir;
        int last_status;
-       u16 (*read_reg) (struct c_can_priv *priv, enum reg index);
-       void (*write_reg) (struct c_can_priv *priv, enum reg index, u16 val);
+       u16 (*read_reg) (const struct c_can_priv *priv, enum reg index);
+       void (*write_reg) (const struct c_can_priv *priv, enum reg index, u16 val);
+       u32 (*read_reg32) (const struct c_can_priv *priv, enum reg index);
+       void (*write_reg32) (const struct c_can_priv *priv, enum reg index, u32 val);
        void __iomem *base;
        const u16 *regs;
-       unsigned long irq_flags; /* for request_irq() */
-       unsigned int tx_next;
-       unsigned int tx_echo;
        void *priv;             /* for board-specific data */
-       u16 irqstatus;
        enum c_can_dev_id type;
        u32 __iomem *raminit_ctrlreg;
-       unsigned int instance;
+       int instance;
        void (*raminit) (const struct c_can_priv *priv, bool enable);
+       u32 comm_rcv_high;
+       u32 rxmasked;
        u32 dlc[C_CAN_MSG_OBJ_TX_NUM];
 };
 
index bce0be54c2f59587a2498d2f37821f2634b886d9..5d11e0e4225bf3c84442b9ec8ddea4a005b4717f 100644 (file)
 
 #include "c_can.h"
 
+#define PCI_DEVICE_ID_PCH_CAN  0x8818
+#define PCH_PCI_SOFT_RESET     0x01fc
+
 enum c_can_pci_reg_align {
        C_CAN_REG_ALIGN_16,
        C_CAN_REG_ALIGN_32,
+       C_CAN_REG_32,
 };
 
 struct c_can_pci_data {
@@ -31,6 +35,10 @@ struct c_can_pci_data {
        enum c_can_pci_reg_align reg_align;
        /* Set the frequency */
        unsigned int freq;
+       /* PCI bar number */
+       int bar;
+       /* Callback for reset */
+       void (*init)(const struct c_can_priv *priv, bool enable);
 };
 
 /*
@@ -39,30 +47,70 @@ struct c_can_pci_data {
  * registers can be aligned to a 16-bit boundary or 32-bit boundary etc.
  * Handle the same by providing a common read/write interface.
  */
-static u16 c_can_pci_read_reg_aligned_to_16bit(struct c_can_priv *priv,
+static u16 c_can_pci_read_reg_aligned_to_16bit(const struct c_can_priv *priv,
                                                enum reg index)
 {
        return readw(priv->base + priv->regs[index]);
 }
 
-static void c_can_pci_write_reg_aligned_to_16bit(struct c_can_priv *priv,
+static void c_can_pci_write_reg_aligned_to_16bit(const struct c_can_priv *priv,
                                                enum reg index, u16 val)
 {
        writew(val, priv->base + priv->regs[index]);
 }
 
-static u16 c_can_pci_read_reg_aligned_to_32bit(struct c_can_priv *priv,
+static u16 c_can_pci_read_reg_aligned_to_32bit(const struct c_can_priv *priv,
                                                enum reg index)
 {
        return readw(priv->base + 2 * priv->regs[index]);
 }
 
-static void c_can_pci_write_reg_aligned_to_32bit(struct c_can_priv *priv,
+static void c_can_pci_write_reg_aligned_to_32bit(const struct c_can_priv *priv,
                                                enum reg index, u16 val)
 {
        writew(val, priv->base + 2 * priv->regs[index]);
 }
 
+static u16 c_can_pci_read_reg_32bit(const struct c_can_priv *priv,
+                                   enum reg index)
+{
+       return (u16)ioread32(priv->base + 2 * priv->regs[index]);
+}
+
+static void c_can_pci_write_reg_32bit(const struct c_can_priv *priv,
+                                     enum reg index, u16 val)
+{
+       iowrite32((u32)val, priv->base + 2 * priv->regs[index]);
+}
+
+static u32 c_can_pci_read_reg32(const struct c_can_priv *priv, enum reg index)
+{
+       u32 val;
+
+       val = priv->read_reg(priv, index);
+       val |= ((u32) priv->read_reg(priv, index + 1)) << 16;
+
+       return val;
+}
+
+static void c_can_pci_write_reg32(const struct c_can_priv *priv, enum reg index,
+               u32 val)
+{
+       priv->write_reg(priv, index + 1, val >> 16);
+       priv->write_reg(priv, index, val);
+}
+
+static void c_can_pci_reset_pch(const struct c_can_priv *priv, bool enable)
+{
+       if (enable) {
+               u32 __iomem *addr = priv->base + PCH_PCI_SOFT_RESET;
+
+               /* write to sw reset register */
+               iowrite32(1, addr);
+               iowrite32(0, addr);
+       }
+}
+
 static int c_can_pci_probe(struct pci_dev *pdev,
                           const struct pci_device_id *ent)
 {
@@ -84,10 +132,14 @@ static int c_can_pci_probe(struct pci_dev *pdev,
                goto out_disable_device;
        }
 
-       pci_set_master(pdev);
-       pci_enable_msi(pdev);
+       ret = pci_enable_msi(pdev);
+       if (!ret) {
+               dev_info(&pdev->dev, "MSI enabled\n");
+               pci_set_master(pdev);
+       }
 
-       addr = pci_iomap(pdev, 0, pci_resource_len(pdev, 0));
+       addr = pci_iomap(pdev, c_can_pci_data->bar,
+                        pci_resource_len(pdev, c_can_pci_data->bar));
        if (!addr) {
                dev_err(&pdev->dev,
                        "device has no PCI memory resources, "
@@ -132,6 +184,8 @@ static int c_can_pci_probe(struct pci_dev *pdev,
                goto out_free_c_can;
        }
 
+       priv->type = c_can_pci_data->type;
+
        /* Configure access to registers */
        switch (c_can_pci_data->reg_align) {
        case C_CAN_REG_ALIGN_32:
@@ -142,10 +196,18 @@ static int c_can_pci_probe(struct pci_dev *pdev,
                priv->read_reg = c_can_pci_read_reg_aligned_to_16bit;
                priv->write_reg = c_can_pci_write_reg_aligned_to_16bit;
                break;
+       case C_CAN_REG_32:
+               priv->read_reg = c_can_pci_read_reg_32bit;
+               priv->write_reg = c_can_pci_write_reg_32bit;
+               break;
        default:
                ret = -EINVAL;
                goto out_free_c_can;
        }
+       priv->read_reg32 = c_can_pci_read_reg32;
+       priv->write_reg32 = c_can_pci_write_reg32;
+
+       priv->raminit = c_can_pci_data->init;
 
        ret = register_c_can_dev(dev);
        if (ret) {
@@ -193,6 +255,15 @@ static struct c_can_pci_data c_can_sta2x11= {
        .type = BOSCH_C_CAN,
        .reg_align = C_CAN_REG_ALIGN_32,
        .freq = 52000000, /* 52 Mhz */
+       .bar = 0,
+};
+
+static struct c_can_pci_data c_can_pch = {
+       .type = BOSCH_C_CAN,
+       .reg_align = C_CAN_REG_32,
+       .freq = 50000000, /* 50 MHz */
+       .init = c_can_pci_reset_pch,
+       .bar = 1,
 };
 
 #define C_CAN_ID(_vend, _dev, _driverdata) {           \
@@ -202,6 +273,8 @@ static struct c_can_pci_data c_can_sta2x11= {
 static DEFINE_PCI_DEVICE_TABLE(c_can_pci_tbl) = {
        C_CAN_ID(PCI_VENDOR_ID_STMICRO, PCI_DEVICE_ID_STMICRO_CAN,
                 c_can_sta2x11),
+       C_CAN_ID(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_PCH_CAN,
+                c_can_pch),
        {},
 };
 static struct pci_driver c_can_pci_driver = {
index 806d92753427b619fe7241ac290a18aa7769240b..824108cd9fd594a91c25b0b4a1d43d3341ad9a31 100644 (file)
@@ -40,6 +40,7 @@
 #define CAN_RAMINIT_START_MASK(i)      (0x001 << (i))
 #define CAN_RAMINIT_DONE_MASK(i)       (0x100 << (i))
 #define CAN_RAMINIT_ALL_MASK(i)                (0x101 << (i))
+#define DCAN_RAM_INIT_BIT              (1 << 3)
 static DEFINE_SPINLOCK(raminit_lock);
 /*
  * 16-bit c_can registers can be arranged differently in the memory
@@ -47,31 +48,31 @@ static DEFINE_SPINLOCK(raminit_lock);
  * registers can be aligned to a 16-bit boundary or 32-bit boundary etc.
  * Handle the same by providing a common read/write interface.
  */
-static u16 c_can_plat_read_reg_aligned_to_16bit(struct c_can_priv *priv,
+static u16 c_can_plat_read_reg_aligned_to_16bit(const struct c_can_priv *priv,
                                                enum reg index)
 {
        return readw(priv->base + priv->regs[index]);
 }
 
-static void c_can_plat_write_reg_aligned_to_16bit(struct c_can_priv *priv,
+static void c_can_plat_write_reg_aligned_to_16bit(const struct c_can_priv *priv,
                                                enum reg index, u16 val)
 {
        writew(val, priv->base + priv->regs[index]);
 }
 
-static u16 c_can_plat_read_reg_aligned_to_32bit(struct c_can_priv *priv,
+static u16 c_can_plat_read_reg_aligned_to_32bit(const struct c_can_priv *priv,
                                                enum reg index)
 {
        return readw(priv->base + 2 * priv->regs[index]);
 }
 
-static void c_can_plat_write_reg_aligned_to_32bit(struct c_can_priv *priv,
+static void c_can_plat_write_reg_aligned_to_32bit(const struct c_can_priv *priv,
                                                enum reg index, u16 val)
 {
        writew(val, priv->base + 2 * priv->regs[index]);
 }
 
-static void c_can_hw_raminit_wait(const struct c_can_priv *priv, u32 mask,
+static void c_can_hw_raminit_wait_ti(const struct c_can_priv *priv, u32 mask,
                                  u32 val)
 {
        /* We look only at the bits of our instance. */
@@ -80,7 +81,7 @@ static void c_can_hw_raminit_wait(const struct c_can_priv *priv, u32 mask,
                udelay(1);
 }
 
-static void c_can_hw_raminit(const struct c_can_priv *priv, bool enable)
+static void c_can_hw_raminit_ti(const struct c_can_priv *priv, bool enable)
 {
        u32 mask = CAN_RAMINIT_ALL_MASK(priv->instance);
        u32 ctrl;
@@ -96,18 +97,68 @@ static void c_can_hw_raminit(const struct c_can_priv *priv, bool enable)
        ctrl |= CAN_RAMINIT_DONE_MASK(priv->instance);
        writel(ctrl, priv->raminit_ctrlreg);
        ctrl &= ~CAN_RAMINIT_DONE_MASK(priv->instance);
-       c_can_hw_raminit_wait(priv, ctrl, mask);
+       c_can_hw_raminit_wait_ti(priv, ctrl, mask);
 
        if (enable) {
                /* Set start bit and wait for the done bit. */
                ctrl |= CAN_RAMINIT_START_MASK(priv->instance);
                writel(ctrl, priv->raminit_ctrlreg);
                ctrl |= CAN_RAMINIT_DONE_MASK(priv->instance);
-               c_can_hw_raminit_wait(priv, ctrl, mask);
+               c_can_hw_raminit_wait_ti(priv, ctrl, mask);
        }
        spin_unlock(&raminit_lock);
 }
 
+static u32 c_can_plat_read_reg32(const struct c_can_priv *priv, enum reg index)
+{
+       u32 val;
+
+       val = priv->read_reg(priv, index);
+       val |= ((u32) priv->read_reg(priv, index + 1)) << 16;
+
+       return val;
+}
+
+static void c_can_plat_write_reg32(const struct c_can_priv *priv, enum reg index,
+               u32 val)
+{
+       priv->write_reg(priv, index + 1, val >> 16);
+       priv->write_reg(priv, index, val);
+}
+
+static u32 d_can_plat_read_reg32(const struct c_can_priv *priv, enum reg index)
+{
+       return readl(priv->base + priv->regs[index]);
+}
+
+static void d_can_plat_write_reg32(const struct c_can_priv *priv, enum reg index,
+               u32 val)
+{
+       writel(val, priv->base + priv->regs[index]);
+}
+
+static void c_can_hw_raminit_wait(const struct c_can_priv *priv, u32 mask)
+{
+       while (priv->read_reg32(priv, C_CAN_FUNCTION_REG) & mask)
+               udelay(1);
+}
+
+static void c_can_hw_raminit(const struct c_can_priv *priv, bool enable)
+{
+       u32 ctrl;
+
+       ctrl = priv->read_reg32(priv, C_CAN_FUNCTION_REG);
+       ctrl &= ~DCAN_RAM_INIT_BIT;
+       priv->write_reg32(priv, C_CAN_FUNCTION_REG, ctrl);
+       c_can_hw_raminit_wait(priv, ctrl);
+
+       if (enable) {
+               ctrl |= DCAN_RAM_INIT_BIT;
+               priv->write_reg32(priv, C_CAN_FUNCTION_REG, ctrl);
+               c_can_hw_raminit_wait(priv, ctrl);
+       }
+}
+
 static struct platform_device_id c_can_id_table[] = {
        [BOSCH_C_CAN_PLATFORM] = {
                .name = KBUILD_MODNAME,
@@ -201,11 +252,15 @@ static int c_can_plat_probe(struct platform_device *pdev)
                case IORESOURCE_MEM_32BIT:
                        priv->read_reg = c_can_plat_read_reg_aligned_to_32bit;
                        priv->write_reg = c_can_plat_write_reg_aligned_to_32bit;
+                       priv->read_reg32 = c_can_plat_read_reg32;
+                       priv->write_reg32 = c_can_plat_write_reg32;
                        break;
                case IORESOURCE_MEM_16BIT:
                default:
                        priv->read_reg = c_can_plat_read_reg_aligned_to_16bit;
                        priv->write_reg = c_can_plat_write_reg_aligned_to_16bit;
+                       priv->read_reg32 = c_can_plat_read_reg32;
+                       priv->write_reg32 = c_can_plat_write_reg32;
                        break;
                }
                break;
@@ -214,6 +269,8 @@ static int c_can_plat_probe(struct platform_device *pdev)
                priv->can.ctrlmode_supported |= CAN_CTRLMODE_3_SAMPLES;
                priv->read_reg = c_can_plat_read_reg_aligned_to_16bit;
                priv->write_reg = c_can_plat_write_reg_aligned_to_16bit;
+               priv->read_reg32 = d_can_plat_read_reg32;
+               priv->write_reg32 = d_can_plat_write_reg32;
 
                if (pdev->dev.of_node)
                        priv->instance = of_alias_get_id(pdev->dev.of_node, "d_can");
@@ -221,11 +278,20 @@ static int c_can_plat_probe(struct platform_device *pdev)
                        priv->instance = pdev->id;
 
                res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+               /* Not all D_CAN modules have a separate register for the D_CAN
+                * RAM initialization. Use default RAM init bit in D_CAN module
+                * if not specified in DT.
+                */
+               if (!res) {
+                       priv->raminit = c_can_hw_raminit;
+                       break;
+               }
+
                priv->raminit_ctrlreg = devm_ioremap_resource(&pdev->dev, res);
-               if (IS_ERR(priv->raminit_ctrlreg) || (int)priv->instance < 0)
+               if (IS_ERR(priv->raminit_ctrlreg) || priv->instance < 0)
                        dev_info(&pdev->dev, "control memory is not used for raminit\n");
                else
-                       priv->raminit = c_can_hw_raminit;
+                       priv->raminit = c_can_hw_raminit_ti;
                break;
        default:
                ret = -EINVAL;
index c7a260478749ad163ec133df88e7a0086b220a73..e318e87e2bfc00ba9e32aa08858de5f5c1629dcf 100644 (file)
@@ -256,7 +256,7 @@ static int can_get_bittiming(struct net_device *dev, struct can_bittiming *bt,
 
        /* Check if the CAN device has bit-timing parameters */
        if (!btc)
-               return -ENOTSUPP;
+               return -EOPNOTSUPP;
 
        /*
         * Depending on the given can_bittiming parameter structure the CAN
diff --git a/drivers/net/can/mcp251x.c b/drivers/net/can/mcp251x.c
deleted file mode 100644 (file)
index 28c11f8..0000000
+++ /dev/null
@@ -1,1269 +0,0 @@
-/*
- * CAN bus driver for Microchip 251x CAN Controller with SPI Interface
- *
- * MCP2510 support and bug fixes by Christian Pellegrin
- * <chripell@evolware.org>
- *
- * Copyright 2009 Christian Pellegrin EVOL S.r.l.
- *
- * Copyright 2007 Raymarine UK, Ltd. All Rights Reserved.
- * Written under contract by:
- *   Chris Elston, Katalix Systems, Ltd.
- *
- * Based on Microchip MCP251x CAN controller driver written by
- * David Vrabel, Copyright 2006 Arcom Control Systems Ltd.
- *
- * Based on CAN bus driver for the CCAN controller written by
- * - Sascha Hauer, Marc Kleine-Budde, Pengutronix
- * - Simon Kallweit, intefo AG
- * Copyright 2007
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the version 2 of the GNU General Public License
- * as published by the Free Software Foundation
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- *
- *
- *
- * Your platform definition file should specify something like:
- *
- * static struct mcp251x_platform_data mcp251x_info = {
- *         .oscillator_frequency = 8000000,
- * };
- *
- * static struct spi_board_info spi_board_info[] = {
- *         {
- *                 .modalias = "mcp2510",
- *                     // or "mcp2515" depending on your controller
- *                 .platform_data = &mcp251x_info,
- *                 .irq = IRQ_EINT13,
- *                 .max_speed_hz = 2*1000*1000,
- *                 .chip_select = 2,
- *         },
- * };
- *
- * Please see mcp251x.h for a description of the fields in
- * struct mcp251x_platform_data.
- *
- */
-
-#include <linux/can/core.h>
-#include <linux/can/dev.h>
-#include <linux/can/led.h>
-#include <linux/can/platform/mcp251x.h>
-#include <linux/clk.h>
-#include <linux/completion.h>
-#include <linux/delay.h>
-#include <linux/device.h>
-#include <linux/dma-mapping.h>
-#include <linux/freezer.h>
-#include <linux/interrupt.h>
-#include <linux/io.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/netdevice.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
-#include <linux/platform_device.h>
-#include <linux/slab.h>
-#include <linux/spi/spi.h>
-#include <linux/uaccess.h>
-#include <linux/regulator/consumer.h>
-
-/* SPI interface instruction set */
-#define INSTRUCTION_WRITE      0x02
-#define INSTRUCTION_READ       0x03
-#define INSTRUCTION_BIT_MODIFY 0x05
-#define INSTRUCTION_LOAD_TXB(n)        (0x40 + 2 * (n))
-#define INSTRUCTION_READ_RXB(n)        (((n) == 0) ? 0x90 : 0x94)
-#define INSTRUCTION_RESET      0xC0
-#define RTS_TXB0               0x01
-#define RTS_TXB1               0x02
-#define RTS_TXB2               0x04
-#define INSTRUCTION_RTS(n)     (0x80 | ((n) & 0x07))
-
-
-/* MPC251x registers */
-#define CANSTAT              0x0e
-#define CANCTRL              0x0f
-#  define CANCTRL_REQOP_MASK       0xe0
-#  define CANCTRL_REQOP_CONF       0x80
-#  define CANCTRL_REQOP_LISTEN_ONLY 0x60
-#  define CANCTRL_REQOP_LOOPBACK    0x40
-#  define CANCTRL_REQOP_SLEEP      0x20
-#  define CANCTRL_REQOP_NORMAL     0x00
-#  define CANCTRL_OSM              0x08
-#  define CANCTRL_ABAT             0x10
-#define TEC          0x1c
-#define REC          0x1d
-#define CNF1         0x2a
-#  define CNF1_SJW_SHIFT   6
-#define CNF2         0x29
-#  define CNF2_BTLMODE    0x80
-#  define CNF2_SAM         0x40
-#  define CNF2_PS1_SHIFT   3
-#define CNF3         0x28
-#  define CNF3_SOF        0x08
-#  define CNF3_WAKFIL     0x04
-#  define CNF3_PHSEG2_MASK 0x07
-#define CANINTE              0x2b
-#  define CANINTE_MERRE 0x80
-#  define CANINTE_WAKIE 0x40
-#  define CANINTE_ERRIE 0x20
-#  define CANINTE_TX2IE 0x10
-#  define CANINTE_TX1IE 0x08
-#  define CANINTE_TX0IE 0x04
-#  define CANINTE_RX1IE 0x02
-#  define CANINTE_RX0IE 0x01
-#define CANINTF              0x2c
-#  define CANINTF_MERRF 0x80
-#  define CANINTF_WAKIF 0x40
-#  define CANINTF_ERRIF 0x20
-#  define CANINTF_TX2IF 0x10
-#  define CANINTF_TX1IF 0x08
-#  define CANINTF_TX0IF 0x04
-#  define CANINTF_RX1IF 0x02
-#  define CANINTF_RX0IF 0x01
-#  define CANINTF_RX (CANINTF_RX0IF | CANINTF_RX1IF)
-#  define CANINTF_TX (CANINTF_TX2IF | CANINTF_TX1IF | CANINTF_TX0IF)
-#  define CANINTF_ERR (CANINTF_ERRIF)
-#define EFLG         0x2d
-#  define EFLG_EWARN   0x01
-#  define EFLG_RXWAR   0x02
-#  define EFLG_TXWAR   0x04
-#  define EFLG_RXEP    0x08
-#  define EFLG_TXEP    0x10
-#  define EFLG_TXBO    0x20
-#  define EFLG_RX0OVR  0x40
-#  define EFLG_RX1OVR  0x80
-#define TXBCTRL(n)  (((n) * 0x10) + 0x30 + TXBCTRL_OFF)
-#  define TXBCTRL_ABTF 0x40
-#  define TXBCTRL_MLOA 0x20
-#  define TXBCTRL_TXERR 0x10
-#  define TXBCTRL_TXREQ 0x08
-#define TXBSIDH(n)  (((n) * 0x10) + 0x30 + TXBSIDH_OFF)
-#  define SIDH_SHIFT    3
-#define TXBSIDL(n)  (((n) * 0x10) + 0x30 + TXBSIDL_OFF)
-#  define SIDL_SID_MASK    7
-#  define SIDL_SID_SHIFT   5
-#  define SIDL_EXIDE_SHIFT 3
-#  define SIDL_EID_SHIFT   16
-#  define SIDL_EID_MASK    3
-#define TXBEID8(n)  (((n) * 0x10) + 0x30 + TXBEID8_OFF)
-#define TXBEID0(n)  (((n) * 0x10) + 0x30 + TXBEID0_OFF)
-#define TXBDLC(n)   (((n) * 0x10) + 0x30 + TXBDLC_OFF)
-#  define DLC_RTR_SHIFT    6
-#define TXBCTRL_OFF 0
-#define TXBSIDH_OFF 1
-#define TXBSIDL_OFF 2
-#define TXBEID8_OFF 3
-#define TXBEID0_OFF 4
-#define TXBDLC_OFF  5
-#define TXBDAT_OFF  6
-#define RXBCTRL(n)  (((n) * 0x10) + 0x60 + RXBCTRL_OFF)
-#  define RXBCTRL_BUKT 0x04
-#  define RXBCTRL_RXM0 0x20
-#  define RXBCTRL_RXM1 0x40
-#define RXBSIDH(n)  (((n) * 0x10) + 0x60 + RXBSIDH_OFF)
-#  define RXBSIDH_SHIFT 3
-#define RXBSIDL(n)  (((n) * 0x10) + 0x60 + RXBSIDL_OFF)
-#  define RXBSIDL_IDE   0x08
-#  define RXBSIDL_SRR   0x10
-#  define RXBSIDL_EID   3
-#  define RXBSIDL_SHIFT 5
-#define RXBEID8(n)  (((n) * 0x10) + 0x60 + RXBEID8_OFF)
-#define RXBEID0(n)  (((n) * 0x10) + 0x60 + RXBEID0_OFF)
-#define RXBDLC(n)   (((n) * 0x10) + 0x60 + RXBDLC_OFF)
-#  define RXBDLC_LEN_MASK  0x0f
-#  define RXBDLC_RTR       0x40
-#define RXBCTRL_OFF 0
-#define RXBSIDH_OFF 1
-#define RXBSIDL_OFF 2
-#define RXBEID8_OFF 3
-#define RXBEID0_OFF 4
-#define RXBDLC_OFF  5
-#define RXBDAT_OFF  6
-#define RXFSIDH(n) ((n) * 4)
-#define RXFSIDL(n) ((n) * 4 + 1)
-#define RXFEID8(n) ((n) * 4 + 2)
-#define RXFEID0(n) ((n) * 4 + 3)
-#define RXMSIDH(n) ((n) * 4 + 0x20)
-#define RXMSIDL(n) ((n) * 4 + 0x21)
-#define RXMEID8(n) ((n) * 4 + 0x22)
-#define RXMEID0(n) ((n) * 4 + 0x23)
-
-#define GET_BYTE(val, byte)                    \
-       (((val) >> ((byte) * 8)) & 0xff)
-#define SET_BYTE(val, byte)                    \
-       (((val) & 0xff) << ((byte) * 8))
-
-/*
- * Buffer size required for the largest SPI transfer (i.e., reading a
- * frame)
- */
-#define CAN_FRAME_MAX_DATA_LEN 8
-#define SPI_TRANSFER_BUF_LEN   (6 + CAN_FRAME_MAX_DATA_LEN)
-#define CAN_FRAME_MAX_BITS     128
-
-#define TX_ECHO_SKB_MAX        1
-
-#define DEVICE_NAME "mcp251x"
-
-static int mcp251x_enable_dma; /* Enable SPI DMA. Default: 0 (Off) */
-module_param(mcp251x_enable_dma, int, S_IRUGO);
-MODULE_PARM_DESC(mcp251x_enable_dma, "Enable SPI DMA. Default: 0 (Off)");
-
-static const struct can_bittiming_const mcp251x_bittiming_const = {
-       .name = DEVICE_NAME,
-       .tseg1_min = 3,
-       .tseg1_max = 16,
-       .tseg2_min = 2,
-       .tseg2_max = 8,
-       .sjw_max = 4,
-       .brp_min = 1,
-       .brp_max = 64,
-       .brp_inc = 1,
-};
-
-enum mcp251x_model {
-       CAN_MCP251X_MCP2510     = 0x2510,
-       CAN_MCP251X_MCP2515     = 0x2515,
-};
-
-struct mcp251x_priv {
-       struct can_priv    can;
-       struct net_device *net;
-       struct spi_device *spi;
-       enum mcp251x_model model;
-
-       struct mutex mcp_lock; /* SPI device lock */
-
-       u8 *spi_tx_buf;
-       u8 *spi_rx_buf;
-       dma_addr_t spi_tx_dma;
-       dma_addr_t spi_rx_dma;
-
-       struct sk_buff *tx_skb;
-       int tx_len;
-
-       struct workqueue_struct *wq;
-       struct work_struct tx_work;
-       struct work_struct restart_work;
-
-       int force_quit;
-       int after_suspend;
-#define AFTER_SUSPEND_UP 1
-#define AFTER_SUSPEND_DOWN 2
-#define AFTER_SUSPEND_POWER 4
-#define AFTER_SUSPEND_RESTART 8
-       int restart_tx;
-       struct regulator *power;
-       struct regulator *transceiver;
-       struct clk *clk;
-};
-
-#define MCP251X_IS(_model) \
-static inline int mcp251x_is_##_model(struct spi_device *spi) \
-{ \
-       struct mcp251x_priv *priv = spi_get_drvdata(spi); \
-       return priv->model == CAN_MCP251X_MCP##_model; \
-}
-
-MCP251X_IS(2510);
-MCP251X_IS(2515);
-
-static void mcp251x_clean(struct net_device *net)
-{
-       struct mcp251x_priv *priv = netdev_priv(net);
-
-       if (priv->tx_skb || priv->tx_len)
-               net->stats.tx_errors++;
-       if (priv->tx_skb)
-               dev_kfree_skb(priv->tx_skb);
-       if (priv->tx_len)
-               can_free_echo_skb(priv->net, 0);
-       priv->tx_skb = NULL;
-       priv->tx_len = 0;
-}
-
-/*
- * Note about handling of error return of mcp251x_spi_trans: accessing
- * registers via SPI is not really different conceptually than using
- * normal I/O assembler instructions, although it's much more
- * complicated from a practical POV. So it's not advisable to always
- * check the return value of this function. Imagine that every
- * read{b,l}, write{b,l} and friends would be bracketed in "if ( < 0)
- * error();", it would be a great mess (well there are some situation
- * when exception handling C++ like could be useful after all). So we
- * just check that transfers are OK at the beginning of our
- * conversation with the chip and to avoid doing really nasty things
- * (like injecting bogus packets in the network stack).
- */
-static int mcp251x_spi_trans(struct spi_device *spi, int len)
-{
-       struct mcp251x_priv *priv = spi_get_drvdata(spi);
-       struct spi_transfer t = {
-               .tx_buf = priv->spi_tx_buf,
-               .rx_buf = priv->spi_rx_buf,
-               .len = len,
-               .cs_change = 0,
-       };
-       struct spi_message m;
-       int ret;
-
-       spi_message_init(&m);
-
-       if (mcp251x_enable_dma) {
-               t.tx_dma = priv->spi_tx_dma;
-               t.rx_dma = priv->spi_rx_dma;
-               m.is_dma_mapped = 1;
-       }
-
-       spi_message_add_tail(&t, &m);
-
-       ret = spi_sync(spi, &m);
-       if (ret)
-               dev_err(&spi->dev, "spi transfer failed: ret = %d\n", ret);
-       return ret;
-}
-
-static u8 mcp251x_read_reg(struct spi_device *spi, uint8_t reg)
-{
-       struct mcp251x_priv *priv = spi_get_drvdata(spi);
-       u8 val = 0;
-
-       priv->spi_tx_buf[0] = INSTRUCTION_READ;
-       priv->spi_tx_buf[1] = reg;
-
-       mcp251x_spi_trans(spi, 3);
-       val = priv->spi_rx_buf[2];
-
-       return val;
-}
-
-static void mcp251x_read_2regs(struct spi_device *spi, uint8_t reg,
-               uint8_t *v1, uint8_t *v2)
-{
-       struct mcp251x_priv *priv = spi_get_drvdata(spi);
-
-       priv->spi_tx_buf[0] = INSTRUCTION_READ;
-       priv->spi_tx_buf[1] = reg;
-
-       mcp251x_spi_trans(spi, 4);
-
-       *v1 = priv->spi_rx_buf[2];
-       *v2 = priv->spi_rx_buf[3];
-}
-
-static void mcp251x_write_reg(struct spi_device *spi, u8 reg, uint8_t val)
-{
-       struct mcp251x_priv *priv = spi_get_drvdata(spi);
-
-       priv->spi_tx_buf[0] = INSTRUCTION_WRITE;
-       priv->spi_tx_buf[1] = reg;
-       priv->spi_tx_buf[2] = val;
-
-       mcp251x_spi_trans(spi, 3);
-}
-
-static void mcp251x_write_bits(struct spi_device *spi, u8 reg,
-                              u8 mask, uint8_t val)
-{
-       struct mcp251x_priv *priv = spi_get_drvdata(spi);
-
-       priv->spi_tx_buf[0] = INSTRUCTION_BIT_MODIFY;
-       priv->spi_tx_buf[1] = reg;
-       priv->spi_tx_buf[2] = mask;
-       priv->spi_tx_buf[3] = val;
-
-       mcp251x_spi_trans(spi, 4);
-}
-
-static void mcp251x_hw_tx_frame(struct spi_device *spi, u8 *buf,
-                               int len, int tx_buf_idx)
-{
-       struct mcp251x_priv *priv = spi_get_drvdata(spi);
-
-       if (mcp251x_is_2510(spi)) {
-               int i;
-
-               for (i = 1; i < TXBDAT_OFF + len; i++)
-                       mcp251x_write_reg(spi, TXBCTRL(tx_buf_idx) + i,
-                                         buf[i]);
-       } else {
-               memcpy(priv->spi_tx_buf, buf, TXBDAT_OFF + len);
-               mcp251x_spi_trans(spi, TXBDAT_OFF + len);
-       }
-}
-
-static void mcp251x_hw_tx(struct spi_device *spi, struct can_frame *frame,
-                         int tx_buf_idx)
-{
-       struct mcp251x_priv *priv = spi_get_drvdata(spi);
-       u32 sid, eid, exide, rtr;
-       u8 buf[SPI_TRANSFER_BUF_LEN];
-
-       exide = (frame->can_id & CAN_EFF_FLAG) ? 1 : 0; /* Extended ID Enable */
-       if (exide)
-               sid = (frame->can_id & CAN_EFF_MASK) >> 18;
-       else
-               sid = frame->can_id & CAN_SFF_MASK; /* Standard ID */
-       eid = frame->can_id & CAN_EFF_MASK; /* Extended ID */
-       rtr = (frame->can_id & CAN_RTR_FLAG) ? 1 : 0; /* Remote transmission */
-
-       buf[TXBCTRL_OFF] = INSTRUCTION_LOAD_TXB(tx_buf_idx);
-       buf[TXBSIDH_OFF] = sid >> SIDH_SHIFT;
-       buf[TXBSIDL_OFF] = ((sid & SIDL_SID_MASK) << SIDL_SID_SHIFT) |
-               (exide << SIDL_EXIDE_SHIFT) |
-               ((eid >> SIDL_EID_SHIFT) & SIDL_EID_MASK);
-       buf[TXBEID8_OFF] = GET_BYTE(eid, 1);
-       buf[TXBEID0_OFF] = GET_BYTE(eid, 0);
-       buf[TXBDLC_OFF] = (rtr << DLC_RTR_SHIFT) | frame->can_dlc;
-       memcpy(buf + TXBDAT_OFF, frame->data, frame->can_dlc);
-       mcp251x_hw_tx_frame(spi, buf, frame->can_dlc, tx_buf_idx);
-
-       /* use INSTRUCTION_RTS, to avoid "repeated frame problem" */
-       priv->spi_tx_buf[0] = INSTRUCTION_RTS(1 << tx_buf_idx);
-       mcp251x_spi_trans(priv->spi, 1);
-}
-
-static void mcp251x_hw_rx_frame(struct spi_device *spi, u8 *buf,
-                               int buf_idx)
-{
-       struct mcp251x_priv *priv = spi_get_drvdata(spi);
-
-       if (mcp251x_is_2510(spi)) {
-               int i, len;
-
-               for (i = 1; i < RXBDAT_OFF; i++)
-                       buf[i] = mcp251x_read_reg(spi, RXBCTRL(buf_idx) + i);
-
-               len = get_can_dlc(buf[RXBDLC_OFF] & RXBDLC_LEN_MASK);
-               for (; i < (RXBDAT_OFF + len); i++)
-                       buf[i] = mcp251x_read_reg(spi, RXBCTRL(buf_idx) + i);
-       } else {
-               priv->spi_tx_buf[RXBCTRL_OFF] = INSTRUCTION_READ_RXB(buf_idx);
-               mcp251x_spi_trans(spi, SPI_TRANSFER_BUF_LEN);
-               memcpy(buf, priv->spi_rx_buf, SPI_TRANSFER_BUF_LEN);
-       }
-}
-
-static void mcp251x_hw_rx(struct spi_device *spi, int buf_idx)
-{
-       struct mcp251x_priv *priv = spi_get_drvdata(spi);
-       struct sk_buff *skb;
-       struct can_frame *frame;
-       u8 buf[SPI_TRANSFER_BUF_LEN];
-
-       skb = alloc_can_skb(priv->net, &frame);
-       if (!skb) {
-               dev_err(&spi->dev, "cannot allocate RX skb\n");
-               priv->net->stats.rx_dropped++;
-               return;
-       }
-
-       mcp251x_hw_rx_frame(spi, buf, buf_idx);
-       if (buf[RXBSIDL_OFF] & RXBSIDL_IDE) {
-               /* Extended ID format */
-               frame->can_id = CAN_EFF_FLAG;
-               frame->can_id |=
-                       /* Extended ID part */
-                       SET_BYTE(buf[RXBSIDL_OFF] & RXBSIDL_EID, 2) |
-                       SET_BYTE(buf[RXBEID8_OFF], 1) |
-                       SET_BYTE(buf[RXBEID0_OFF], 0) |
-                       /* Standard ID part */
-                       (((buf[RXBSIDH_OFF] << RXBSIDH_SHIFT) |
-                         (buf[RXBSIDL_OFF] >> RXBSIDL_SHIFT)) << 18);
-               /* Remote transmission request */
-               if (buf[RXBDLC_OFF] & RXBDLC_RTR)
-                       frame->can_id |= CAN_RTR_FLAG;
-       } else {
-               /* Standard ID format */
-               frame->can_id =
-                       (buf[RXBSIDH_OFF] << RXBSIDH_SHIFT) |
-                       (buf[RXBSIDL_OFF] >> RXBSIDL_SHIFT);
-               if (buf[RXBSIDL_OFF] & RXBSIDL_SRR)
-                       frame->can_id |= CAN_RTR_FLAG;
-       }
-       /* Data length */
-       frame->can_dlc = get_can_dlc(buf[RXBDLC_OFF] & RXBDLC_LEN_MASK);
-       memcpy(frame->data, buf + RXBDAT_OFF, frame->can_dlc);
-
-       priv->net->stats.rx_packets++;
-       priv->net->stats.rx_bytes += frame->can_dlc;
-
-       can_led_event(priv->net, CAN_LED_EVENT_RX);
-
-       netif_rx_ni(skb);
-}
-
-static void mcp251x_hw_sleep(struct spi_device *spi)
-{
-       mcp251x_write_reg(spi, CANCTRL, CANCTRL_REQOP_SLEEP);
-}
-
-static netdev_tx_t mcp251x_hard_start_xmit(struct sk_buff *skb,
-                                          struct net_device *net)
-{
-       struct mcp251x_priv *priv = netdev_priv(net);
-       struct spi_device *spi = priv->spi;
-
-       if (priv->tx_skb || priv->tx_len) {
-               dev_warn(&spi->dev, "hard_xmit called while tx busy\n");
-               return NETDEV_TX_BUSY;
-       }
-
-       if (can_dropped_invalid_skb(net, skb))
-               return NETDEV_TX_OK;
-
-       netif_stop_queue(net);
-       priv->tx_skb = skb;
-       queue_work(priv->wq, &priv->tx_work);
-
-       return NETDEV_TX_OK;
-}
-
-static int mcp251x_do_set_mode(struct net_device *net, enum can_mode mode)
-{
-       struct mcp251x_priv *priv = netdev_priv(net);
-
-       switch (mode) {
-       case CAN_MODE_START:
-               mcp251x_clean(net);
-               /* We have to delay work since SPI I/O may sleep */
-               priv->can.state = CAN_STATE_ERROR_ACTIVE;
-               priv->restart_tx = 1;
-               if (priv->can.restart_ms == 0)
-                       priv->after_suspend = AFTER_SUSPEND_RESTART;
-               queue_work(priv->wq, &priv->restart_work);
-               break;
-       default:
-               return -EOPNOTSUPP;
-       }
-
-       return 0;
-}
-
-static int mcp251x_set_normal_mode(struct spi_device *spi)
-{
-       struct mcp251x_priv *priv = spi_get_drvdata(spi);
-       unsigned long timeout;
-
-       /* Enable interrupts */
-       mcp251x_write_reg(spi, CANINTE,
-                         CANINTE_ERRIE | CANINTE_TX2IE | CANINTE_TX1IE |
-                         CANINTE_TX0IE | CANINTE_RX1IE | CANINTE_RX0IE);
-
-       if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) {
-               /* Put device into loopback mode */
-               mcp251x_write_reg(spi, CANCTRL, CANCTRL_REQOP_LOOPBACK);
-       } else if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) {
-               /* Put device into listen-only mode */
-               mcp251x_write_reg(spi, CANCTRL, CANCTRL_REQOP_LISTEN_ONLY);
-       } else {
-               /* Put device into normal mode */
-               mcp251x_write_reg(spi, CANCTRL, CANCTRL_REQOP_NORMAL);
-
-               /* Wait for the device to enter normal mode */
-               timeout = jiffies + HZ;
-               while (mcp251x_read_reg(spi, CANSTAT) & CANCTRL_REQOP_MASK) {
-                       schedule();
-                       if (time_after(jiffies, timeout)) {
-                               dev_err(&spi->dev, "MCP251x didn't"
-                                       " enter in normal mode\n");
-                               return -EBUSY;
-                       }
-               }
-       }
-       priv->can.state = CAN_STATE_ERROR_ACTIVE;
-       return 0;
-}
-
-static int mcp251x_do_set_bittiming(struct net_device *net)
-{
-       struct mcp251x_priv *priv = netdev_priv(net);
-       struct can_bittiming *bt = &priv->can.bittiming;
-       struct spi_device *spi = priv->spi;
-
-       mcp251x_write_reg(spi, CNF1, ((bt->sjw - 1) << CNF1_SJW_SHIFT) |
-                         (bt->brp - 1));
-       mcp251x_write_reg(spi, CNF2, CNF2_BTLMODE |
-                         (priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES ?
-                          CNF2_SAM : 0) |
-                         ((bt->phase_seg1 - 1) << CNF2_PS1_SHIFT) |
-                         (bt->prop_seg - 1));
-       mcp251x_write_bits(spi, CNF3, CNF3_PHSEG2_MASK,
-                          (bt->phase_seg2 - 1));
-       dev_dbg(&spi->dev, "CNF: 0x%02x 0x%02x 0x%02x\n",
-               mcp251x_read_reg(spi, CNF1),
-               mcp251x_read_reg(spi, CNF2),
-               mcp251x_read_reg(spi, CNF3));
-
-       return 0;
-}
-
-static int mcp251x_setup(struct net_device *net, struct mcp251x_priv *priv,
-                        struct spi_device *spi)
-{
-       mcp251x_do_set_bittiming(net);
-
-       mcp251x_write_reg(spi, RXBCTRL(0),
-                         RXBCTRL_BUKT | RXBCTRL_RXM0 | RXBCTRL_RXM1);
-       mcp251x_write_reg(spi, RXBCTRL(1),
-                         RXBCTRL_RXM0 | RXBCTRL_RXM1);
-       return 0;
-}
-
-static int mcp251x_hw_reset(struct spi_device *spi)
-{
-       struct mcp251x_priv *priv = spi_get_drvdata(spi);
-       int ret;
-       unsigned long timeout;
-
-       priv->spi_tx_buf[0] = INSTRUCTION_RESET;
-       ret = spi_write(spi, priv->spi_tx_buf, 1);
-       if (ret) {
-               dev_err(&spi->dev, "reset failed: ret = %d\n", ret);
-               return -EIO;
-       }
-
-       /* Wait for reset to finish */
-       timeout = jiffies + HZ;
-       mdelay(10);
-       while ((mcp251x_read_reg(spi, CANSTAT) & CANCTRL_REQOP_MASK)
-              != CANCTRL_REQOP_CONF) {
-               schedule();
-               if (time_after(jiffies, timeout)) {
-                       dev_err(&spi->dev, "MCP251x didn't"
-                               " enter in conf mode after reset\n");
-                       return -EBUSY;
-               }
-       }
-       return 0;
-}
-
-static int mcp251x_hw_probe(struct spi_device *spi)
-{
-       int st1, st2;
-
-       mcp251x_hw_reset(spi);
-
-       /*
-        * Please note that these are "magic values" based on after
-        * reset defaults taken from data sheet which allows us to see
-        * if we really have a chip on the bus (we avoid common all
-        * zeroes or all ones situations)
-        */
-       st1 = mcp251x_read_reg(spi, CANSTAT) & 0xEE;
-       st2 = mcp251x_read_reg(spi, CANCTRL) & 0x17;
-
-       dev_dbg(&spi->dev, "CANSTAT 0x%02x CANCTRL 0x%02x\n", st1, st2);
-
-       /* Check for power up default values */
-       return (st1 == 0x80 && st2 == 0x07) ? 1 : 0;
-}
-
-static int mcp251x_power_enable(struct regulator *reg, int enable)
-{
-       if (IS_ERR_OR_NULL(reg))
-               return 0;
-
-       if (enable)
-               return regulator_enable(reg);
-       else
-               return regulator_disable(reg);
-}
-
-static void mcp251x_open_clean(struct net_device *net)
-{
-       struct mcp251x_priv *priv = netdev_priv(net);
-       struct spi_device *spi = priv->spi;
-
-       free_irq(spi->irq, priv);
-       mcp251x_hw_sleep(spi);
-       mcp251x_power_enable(priv->transceiver, 0);
-       close_candev(net);
-}
-
-static int mcp251x_stop(struct net_device *net)
-{
-       struct mcp251x_priv *priv = netdev_priv(net);
-       struct spi_device *spi = priv->spi;
-
-       close_candev(net);
-
-       priv->force_quit = 1;
-       free_irq(spi->irq, priv);
-       destroy_workqueue(priv->wq);
-       priv->wq = NULL;
-
-       mutex_lock(&priv->mcp_lock);
-
-       /* Disable and clear pending interrupts */
-       mcp251x_write_reg(spi, CANINTE, 0x00);
-       mcp251x_write_reg(spi, CANINTF, 0x00);
-
-       mcp251x_write_reg(spi, TXBCTRL(0), 0);
-       mcp251x_clean(net);
-
-       mcp251x_hw_sleep(spi);
-
-       mcp251x_power_enable(priv->transceiver, 0);
-
-       priv->can.state = CAN_STATE_STOPPED;
-
-       mutex_unlock(&priv->mcp_lock);
-
-       can_led_event(net, CAN_LED_EVENT_STOP);
-
-       return 0;
-}
-
-static void mcp251x_error_skb(struct net_device *net, int can_id, int data1)
-{
-       struct sk_buff *skb;
-       struct can_frame *frame;
-
-       skb = alloc_can_err_skb(net, &frame);
-       if (skb) {
-               frame->can_id |= can_id;
-               frame->data[1] = data1;
-               netif_rx_ni(skb);
-       } else {
-               netdev_err(net, "cannot allocate error skb\n");
-       }
-}
-
-static void mcp251x_tx_work_handler(struct work_struct *ws)
-{
-       struct mcp251x_priv *priv = container_of(ws, struct mcp251x_priv,
-                                                tx_work);
-       struct spi_device *spi = priv->spi;
-       struct net_device *net = priv->net;
-       struct can_frame *frame;
-
-       mutex_lock(&priv->mcp_lock);
-       if (priv->tx_skb) {
-               if (priv->can.state == CAN_STATE_BUS_OFF) {
-                       mcp251x_clean(net);
-               } else {
-                       frame = (struct can_frame *)priv->tx_skb->data;
-
-                       if (frame->can_dlc > CAN_FRAME_MAX_DATA_LEN)
-                               frame->can_dlc = CAN_FRAME_MAX_DATA_LEN;
-                       mcp251x_hw_tx(spi, frame, 0);
-                       priv->tx_len = 1 + frame->can_dlc;
-                       can_put_echo_skb(priv->tx_skb, net, 0);
-                       priv->tx_skb = NULL;
-               }
-       }
-       mutex_unlock(&priv->mcp_lock);
-}
-
-static void mcp251x_restart_work_handler(struct work_struct *ws)
-{
-       struct mcp251x_priv *priv = container_of(ws, struct mcp251x_priv,
-                                                restart_work);
-       struct spi_device *spi = priv->spi;
-       struct net_device *net = priv->net;
-
-       mutex_lock(&priv->mcp_lock);
-       if (priv->after_suspend) {
-               mdelay(10);
-               mcp251x_hw_reset(spi);
-               mcp251x_setup(net, priv, spi);
-               if (priv->after_suspend & AFTER_SUSPEND_RESTART) {
-                       mcp251x_set_normal_mode(spi);
-               } else if (priv->after_suspend & AFTER_SUSPEND_UP) {
-                       netif_device_attach(net);
-                       mcp251x_clean(net);
-                       mcp251x_set_normal_mode(spi);
-                       netif_wake_queue(net);
-               } else {
-                       mcp251x_hw_sleep(spi);
-               }
-               priv->after_suspend = 0;
-               priv->force_quit = 0;
-       }
-
-       if (priv->restart_tx) {
-               priv->restart_tx = 0;
-               mcp251x_write_reg(spi, TXBCTRL(0), 0);
-               mcp251x_clean(net);
-               netif_wake_queue(net);
-               mcp251x_error_skb(net, CAN_ERR_RESTARTED, 0);
-       }
-       mutex_unlock(&priv->mcp_lock);
-}
-
-static irqreturn_t mcp251x_can_ist(int irq, void *dev_id)
-{
-       struct mcp251x_priv *priv = dev_id;
-       struct spi_device *spi = priv->spi;
-       struct net_device *net = priv->net;
-
-       mutex_lock(&priv->mcp_lock);
-       while (!priv->force_quit) {
-               enum can_state new_state;
-               u8 intf, eflag;
-               u8 clear_intf = 0;
-               int can_id = 0, data1 = 0;
-
-               mcp251x_read_2regs(spi, CANINTF, &intf, &eflag);
-
-               /* mask out flags we don't care about */
-               intf &= CANINTF_RX | CANINTF_TX | CANINTF_ERR;
-
-               /* receive buffer 0 */
-               if (intf & CANINTF_RX0IF) {
-                       mcp251x_hw_rx(spi, 0);
-                       /*
-                        * Free one buffer ASAP
-                        * (The MCP2515 does this automatically.)
-                        */
-                       if (mcp251x_is_2510(spi))
-                               mcp251x_write_bits(spi, CANINTF, CANINTF_RX0IF, 0x00);
-               }
-
-               /* receive buffer 1 */
-               if (intf & CANINTF_RX1IF) {
-                       mcp251x_hw_rx(spi, 1);
-                       /* the MCP2515 does this automatically */
-                       if (mcp251x_is_2510(spi))
-                               clear_intf |= CANINTF_RX1IF;
-               }
-
-               /* any error or tx interrupt we need to clear? */
-               if (intf & (CANINTF_ERR | CANINTF_TX))
-                       clear_intf |= intf & (CANINTF_ERR | CANINTF_TX);
-               if (clear_intf)
-                       mcp251x_write_bits(spi, CANINTF, clear_intf, 0x00);
-
-               if (eflag)
-                       mcp251x_write_bits(spi, EFLG, eflag, 0x00);
-
-               /* Update can state */
-               if (eflag & EFLG_TXBO) {
-                       new_state = CAN_STATE_BUS_OFF;
-                       can_id |= CAN_ERR_BUSOFF;
-               } else if (eflag & EFLG_TXEP) {
-                       new_state = CAN_STATE_ERROR_PASSIVE;
-                       can_id |= CAN_ERR_CRTL;
-                       data1 |= CAN_ERR_CRTL_TX_PASSIVE;
-               } else if (eflag & EFLG_RXEP) {
-                       new_state = CAN_STATE_ERROR_PASSIVE;
-                       can_id |= CAN_ERR_CRTL;
-                       data1 |= CAN_ERR_CRTL_RX_PASSIVE;
-               } else if (eflag & EFLG_TXWAR) {
-                       new_state = CAN_STATE_ERROR_WARNING;
-                       can_id |= CAN_ERR_CRTL;
-                       data1 |= CAN_ERR_CRTL_TX_WARNING;
-               } else if (eflag & EFLG_RXWAR) {
-                       new_state = CAN_STATE_ERROR_WARNING;
-                       can_id |= CAN_ERR_CRTL;
-                       data1 |= CAN_ERR_CRTL_RX_WARNING;
-               } else {
-                       new_state = CAN_STATE_ERROR_ACTIVE;
-               }
-
-               /* Update can state statistics */
-               switch (priv->can.state) {
-               case CAN_STATE_ERROR_ACTIVE:
-                       if (new_state >= CAN_STATE_ERROR_WARNING &&
-                           new_state <= CAN_STATE_BUS_OFF)
-                               priv->can.can_stats.error_warning++;
-               case CAN_STATE_ERROR_WARNING:   /* fallthrough */
-                       if (new_state >= CAN_STATE_ERROR_PASSIVE &&
-                           new_state <= CAN_STATE_BUS_OFF)
-                               priv->can.can_stats.error_passive++;
-                       break;
-               default:
-                       break;
-               }
-               priv->can.state = new_state;
-
-               if (intf & CANINTF_ERRIF) {
-                       /* Handle overflow counters */
-                       if (eflag & (EFLG_RX0OVR | EFLG_RX1OVR)) {
-                               if (eflag & EFLG_RX0OVR) {
-                                       net->stats.rx_over_errors++;
-                                       net->stats.rx_errors++;
-                               }
-                               if (eflag & EFLG_RX1OVR) {
-                                       net->stats.rx_over_errors++;
-                                       net->stats.rx_errors++;
-                               }
-                               can_id |= CAN_ERR_CRTL;
-                               data1 |= CAN_ERR_CRTL_RX_OVERFLOW;
-                       }
-                       mcp251x_error_skb(net, can_id, data1);
-               }
-
-               if (priv->can.state == CAN_STATE_BUS_OFF) {
-                       if (priv->can.restart_ms == 0) {
-                               priv->force_quit = 1;
-                               can_bus_off(net);
-                               mcp251x_hw_sleep(spi);
-                               break;
-                       }
-               }
-
-               if (intf == 0)
-                       break;
-
-               if (intf & CANINTF_TX) {
-                       net->stats.tx_packets++;
-                       net->stats.tx_bytes += priv->tx_len - 1;
-                       can_led_event(net, CAN_LED_EVENT_TX);
-                       if (priv->tx_len) {
-                               can_get_echo_skb(net, 0);
-                               priv->tx_len = 0;
-                       }
-                       netif_wake_queue(net);
-               }
-
-       }
-       mutex_unlock(&priv->mcp_lock);
-       return IRQ_HANDLED;
-}
-
-static int mcp251x_open(struct net_device *net)
-{
-       struct mcp251x_priv *priv = netdev_priv(net);
-       struct spi_device *spi = priv->spi;
-       unsigned long flags = IRQF_ONESHOT | IRQF_TRIGGER_FALLING;
-       int ret;
-
-       ret = open_candev(net);
-       if (ret) {
-               dev_err(&spi->dev, "unable to set initial baudrate!\n");
-               return ret;
-       }
-
-       mutex_lock(&priv->mcp_lock);
-       mcp251x_power_enable(priv->transceiver, 1);
-
-       priv->force_quit = 0;
-       priv->tx_skb = NULL;
-       priv->tx_len = 0;
-
-       ret = request_threaded_irq(spi->irq, NULL, mcp251x_can_ist,
-                                  flags, DEVICE_NAME, priv);
-       if (ret) {
-               dev_err(&spi->dev, "failed to acquire irq %d\n", spi->irq);
-               mcp251x_power_enable(priv->transceiver, 0);
-               close_candev(net);
-               goto open_unlock;
-       }
-
-       priv->wq = create_freezable_workqueue("mcp251x_wq");
-       INIT_WORK(&priv->tx_work, mcp251x_tx_work_handler);
-       INIT_WORK(&priv->restart_work, mcp251x_restart_work_handler);
-
-       ret = mcp251x_hw_reset(spi);
-       if (ret) {
-               mcp251x_open_clean(net);
-               goto open_unlock;
-       }
-       ret = mcp251x_setup(net, priv, spi);
-       if (ret) {
-               mcp251x_open_clean(net);
-               goto open_unlock;
-       }
-       ret = mcp251x_set_normal_mode(spi);
-       if (ret) {
-               mcp251x_open_clean(net);
-               goto open_unlock;
-       }
-
-       can_led_event(net, CAN_LED_EVENT_OPEN);
-
-       netif_wake_queue(net);
-
-open_unlock:
-       mutex_unlock(&priv->mcp_lock);
-       return ret;
-}
-
-static const struct net_device_ops mcp251x_netdev_ops = {
-       .ndo_open = mcp251x_open,
-       .ndo_stop = mcp251x_stop,
-       .ndo_start_xmit = mcp251x_hard_start_xmit,
-       .ndo_change_mtu = can_change_mtu,
-};
-
-static const struct of_device_id mcp251x_of_match[] = {
-       {
-               .compatible     = "microchip,mcp2510",
-               .data           = (void *)CAN_MCP251X_MCP2510,
-       },
-       {
-               .compatible     = "microchip,mcp2515",
-               .data           = (void *)CAN_MCP251X_MCP2515,
-       },
-       { }
-};
-MODULE_DEVICE_TABLE(of, mcp251x_of_match);
-
-static const struct spi_device_id mcp251x_id_table[] = {
-       {
-               .name           = "mcp2510",
-               .driver_data    = (kernel_ulong_t)CAN_MCP251X_MCP2510,
-       },
-       {
-               .name           = "mcp2515",
-               .driver_data    = (kernel_ulong_t)CAN_MCP251X_MCP2515,
-       },
-       { }
-};
-MODULE_DEVICE_TABLE(spi, mcp251x_id_table);
-
-static int mcp251x_can_probe(struct spi_device *spi)
-{
-       const struct of_device_id *of_id = of_match_device(mcp251x_of_match,
-                                                          &spi->dev);
-       struct mcp251x_platform_data *pdata = dev_get_platdata(&spi->dev);
-       struct net_device *net;
-       struct mcp251x_priv *priv;
-       int freq, ret = -ENODEV;
-       struct clk *clk;
-
-       clk = devm_clk_get(&spi->dev, NULL);
-       if (IS_ERR(clk)) {
-               if (pdata)
-                       freq = pdata->oscillator_frequency;
-               else
-                       return PTR_ERR(clk);
-       } else {
-               freq = clk_get_rate(clk);
-       }
-
-       /* Sanity check */
-       if (freq < 1000000 || freq > 25000000)
-               return -ERANGE;
-
-       /* Allocate can/net device */
-       net = alloc_candev(sizeof(struct mcp251x_priv), TX_ECHO_SKB_MAX);
-       if (!net)
-               return -ENOMEM;
-
-       if (!IS_ERR(clk)) {
-               ret = clk_prepare_enable(clk);
-               if (ret)
-                       goto out_free;
-       }
-
-       net->netdev_ops = &mcp251x_netdev_ops;
-       net->flags |= IFF_ECHO;
-
-       priv = netdev_priv(net);
-       priv->can.bittiming_const = &mcp251x_bittiming_const;
-       priv->can.do_set_mode = mcp251x_do_set_mode;
-       priv->can.clock.freq = freq / 2;
-       priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES |
-               CAN_CTRLMODE_LOOPBACK | CAN_CTRLMODE_LISTENONLY;
-       if (of_id)
-               priv->model = (enum mcp251x_model)of_id->data;
-       else
-               priv->model = spi_get_device_id(spi)->driver_data;
-       priv->net = net;
-       priv->clk = clk;
-
-       priv->power = devm_regulator_get(&spi->dev, "vdd");
-       priv->transceiver = devm_regulator_get(&spi->dev, "xceiver");
-       if ((PTR_ERR(priv->power) == -EPROBE_DEFER) ||
-           (PTR_ERR(priv->transceiver) == -EPROBE_DEFER)) {
-               ret = -EPROBE_DEFER;
-               goto out_clk;
-       }
-
-       ret = mcp251x_power_enable(priv->power, 1);
-       if (ret)
-               goto out_clk;
-
-       spi_set_drvdata(spi, priv);
-
-       priv->spi = spi;
-       mutex_init(&priv->mcp_lock);
-
-       /* If requested, allocate DMA buffers */
-       if (mcp251x_enable_dma) {
-               spi->dev.coherent_dma_mask = ~0;
-
-               /*
-                * Minimum coherent DMA allocation is PAGE_SIZE, so allocate
-                * that much and share it between Tx and Rx DMA buffers.
-                */
-               priv->spi_tx_buf = dma_alloc_coherent(&spi->dev,
-                                                     PAGE_SIZE,
-                                                     &priv->spi_tx_dma,
-                                                     GFP_DMA);
-
-               if (priv->spi_tx_buf) {
-                       priv->spi_rx_buf = (priv->spi_tx_buf + (PAGE_SIZE / 2));
-                       priv->spi_rx_dma = (dma_addr_t)(priv->spi_tx_dma +
-                                                       (PAGE_SIZE / 2));
-               } else {
-                       /* Fall back to non-DMA */
-                       mcp251x_enable_dma = 0;
-               }
-       }
-
-       /* Allocate non-DMA buffers */
-       if (!mcp251x_enable_dma) {
-               priv->spi_tx_buf = devm_kzalloc(&spi->dev, SPI_TRANSFER_BUF_LEN,
-                                               GFP_KERNEL);
-               if (!priv->spi_tx_buf) {
-                       ret = -ENOMEM;
-                       goto error_probe;
-               }
-               priv->spi_rx_buf = devm_kzalloc(&spi->dev, SPI_TRANSFER_BUF_LEN,
-                                               GFP_KERNEL);
-               if (!priv->spi_rx_buf) {
-                       ret = -ENOMEM;
-                       goto error_probe;
-               }
-       }
-
-       SET_NETDEV_DEV(net, &spi->dev);
-
-       /* Configure the SPI bus */
-       spi->mode = spi->mode ? : SPI_MODE_0;
-       if (mcp251x_is_2510(spi))
-               spi->max_speed_hz = spi->max_speed_hz ? : 5 * 1000 * 1000;
-       else
-               spi->max_speed_hz = spi->max_speed_hz ? : 10 * 1000 * 1000;
-       spi->bits_per_word = 8;
-       spi_setup(spi);
-
-       /* Here is OK to not lock the MCP, no one knows about it yet */
-       if (!mcp251x_hw_probe(spi)) {
-               ret = -ENODEV;
-               goto error_probe;
-       }
-       mcp251x_hw_sleep(spi);
-
-       ret = register_candev(net);
-       if (ret)
-               goto error_probe;
-
-       devm_can_led_init(net);
-
-       return ret;
-
-error_probe:
-       if (mcp251x_enable_dma)
-               dma_free_coherent(&spi->dev, PAGE_SIZE,
-                                 priv->spi_tx_buf, priv->spi_tx_dma);
-       mcp251x_power_enable(priv->power, 0);
-
-out_clk:
-       if (!IS_ERR(clk))
-               clk_disable_unprepare(clk);
-
-out_free:
-       free_candev(net);
-
-       return ret;
-}
-
-static int mcp251x_can_remove(struct spi_device *spi)
-{
-       struct mcp251x_priv *priv = spi_get_drvdata(spi);
-       struct net_device *net = priv->net;
-
-       unregister_candev(net);
-
-       if (mcp251x_enable_dma) {
-               dma_free_coherent(&spi->dev, PAGE_SIZE,
-                                 priv->spi_tx_buf, priv->spi_tx_dma);
-       }
-
-       mcp251x_power_enable(priv->power, 0);
-
-       if (!IS_ERR(priv->clk))
-               clk_disable_unprepare(priv->clk);
-
-       free_candev(net);
-
-       return 0;
-}
-
-static int __maybe_unused mcp251x_can_suspend(struct device *dev)
-{
-       struct spi_device *spi = to_spi_device(dev);
-       struct mcp251x_priv *priv = spi_get_drvdata(spi);
-       struct net_device *net = priv->net;
-
-       priv->force_quit = 1;
-       disable_irq(spi->irq);
-       /*
-        * Note: at this point neither IST nor workqueues are running.
-        * open/stop cannot be called anyway so locking is not needed
-        */
-       if (netif_running(net)) {
-               netif_device_detach(net);
-
-               mcp251x_hw_sleep(spi);
-               mcp251x_power_enable(priv->transceiver, 0);
-               priv->after_suspend = AFTER_SUSPEND_UP;
-       } else {
-               priv->after_suspend = AFTER_SUSPEND_DOWN;
-       }
-
-       if (!IS_ERR_OR_NULL(priv->power)) {
-               regulator_disable(priv->power);
-               priv->after_suspend |= AFTER_SUSPEND_POWER;
-       }
-
-       return 0;
-}
-
-static int __maybe_unused mcp251x_can_resume(struct device *dev)
-{
-       struct spi_device *spi = to_spi_device(dev);
-       struct mcp251x_priv *priv = spi_get_drvdata(spi);
-
-       if (priv->after_suspend & AFTER_SUSPEND_POWER) {
-               mcp251x_power_enable(priv->power, 1);
-               queue_work(priv->wq, &priv->restart_work);
-       } else {
-               if (priv->after_suspend & AFTER_SUSPEND_UP) {
-                       mcp251x_power_enable(priv->transceiver, 1);
-                       queue_work(priv->wq, &priv->restart_work);
-               } else {
-                       priv->after_suspend = 0;
-               }
-       }
-       priv->force_quit = 0;
-       enable_irq(spi->irq);
-       return 0;
-}
-
-static SIMPLE_DEV_PM_OPS(mcp251x_can_pm_ops, mcp251x_can_suspend,
-       mcp251x_can_resume);
-
-static struct spi_driver mcp251x_can_driver = {
-       .driver = {
-               .name = DEVICE_NAME,
-               .owner = THIS_MODULE,
-               .of_match_table = mcp251x_of_match,
-               .pm = &mcp251x_can_pm_ops,
-       },
-       .id_table = mcp251x_id_table,
-       .probe = mcp251x_can_probe,
-       .remove = mcp251x_can_remove,
-};
-module_spi_driver(mcp251x_can_driver);
-
-MODULE_AUTHOR("Chris Elston <celston@katalix.com>, "
-             "Christian Pellegrin <chripell@evolware.org>");
-MODULE_DESCRIPTION("Microchip 251x CAN driver");
-MODULE_LICENSE("GPL v2");
index f19be5269e7be55ae92ccdb12f2274768aeb5ddf..81c711719490511718122ebc5ebf77bc6c883e6c 100644 (file)
@@ -1,5 +1,5 @@
 config CAN_MSCAN
-       depends on PPC || M68K
+       depends on PPC
        tristate "Support for Freescale MSCAN based chips"
        ---help---
          The Motorola Scalable Controller Area Network (MSCAN) definition
diff --git a/drivers/net/can/rcar_can.c b/drivers/net/can/rcar_can.c
new file mode 100644 (file)
index 0000000..5268d21
--- /dev/null
@@ -0,0 +1,876 @@
+/* Renesas R-Car CAN device driver
+ *
+ * Copyright (C) 2013 Cogent Embedded, Inc. <source@cogentembedded.com>
+ * Copyright (C) 2013 Renesas Solutions Corp.
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/errno.h>
+#include <linux/netdevice.h>
+#include <linux/platform_device.h>
+#include <linux/can/led.h>
+#include <linux/can/dev.h>
+#include <linux/clk.h>
+#include <linux/can/platform/rcar_can.h>
+
+#define RCAR_CAN_DRV_NAME      "rcar_can"
+
+/* Mailbox configuration:
+ * mailbox 60 - 63 - Rx FIFO mailboxes
+ * mailbox 56 - 59 - Tx FIFO mailboxes
+ * non-FIFO mailboxes are not used
+ */
+#define RCAR_CAN_N_MBX         64 /* Number of mailboxes in non-FIFO mode */
+#define RCAR_CAN_RX_FIFO_MBX   60 /* Mailbox - window to Rx FIFO */
+#define RCAR_CAN_TX_FIFO_MBX   56 /* Mailbox - window to Tx FIFO */
+#define RCAR_CAN_FIFO_DEPTH    4
+
+/* Mailbox registers structure */
+struct rcar_can_mbox_regs {
+       u32 id;         /* IDE and RTR bits, SID and EID */
+       u8 stub;        /* Not used */
+       u8 dlc;         /* Data Length Code - bits [0..3] */
+       u8 data[8];     /* Data Bytes */
+       u8 tsh;         /* Time Stamp Higher Byte */
+       u8 tsl;         /* Time Stamp Lower Byte */
+};
+
+struct rcar_can_regs {
+       struct rcar_can_mbox_regs mb[RCAR_CAN_N_MBX]; /* Mailbox registers */
+       u32 mkr_2_9[8]; /* Mask Registers 2-9 */
+       u32 fidcr[2];   /* FIFO Received ID Compare Register */
+       u32 mkivlr1;    /* Mask Invalid Register 1 */
+       u32 mier1;      /* Mailbox Interrupt Enable Register 1 */
+       u32 mkr_0_1[2]; /* Mask Registers 0-1 */
+       u32 mkivlr0;    /* Mask Invalid Register 0*/
+       u32 mier0;      /* Mailbox Interrupt Enable Register 0 */
+       u8 pad_440[0x3c0];
+       u8 mctl[64];    /* Message Control Registers */
+       u16 ctlr;       /* Control Register */
+       u16 str;        /* Status register */
+       u8 bcr[3];      /* Bit Configuration Register */
+       u8 clkr;        /* Clock Select Register */
+       u8 rfcr;        /* Receive FIFO Control Register */
+       u8 rfpcr;       /* Receive FIFO Pointer Control Register */
+       u8 tfcr;        /* Transmit FIFO Control Register */
+       u8 tfpcr;       /* Transmit FIFO Pointer Control Register */
+       u8 eier;        /* Error Interrupt Enable Register */
+       u8 eifr;        /* Error Interrupt Factor Judge Register */
+       u8 recr;        /* Receive Error Count Register */
+       u8 tecr;        /* Transmit Error Count Register */
+       u8 ecsr;        /* Error Code Store Register */
+       u8 cssr;        /* Channel Search Support Register */
+       u8 mssr;        /* Mailbox Search Status Register */
+       u8 msmr;        /* Mailbox Search Mode Register */
+       u16 tsr;        /* Time Stamp Register */
+       u8 afsr;        /* Acceptance Filter Support Register */
+       u8 pad_857;
+       u8 tcr;         /* Test Control Register */
+       u8 pad_859[7];
+       u8 ier;         /* Interrupt Enable Register */
+       u8 isr;         /* Interrupt Status Register */
+       u8 pad_862;
+       u8 mbsmr;       /* Mailbox Search Mask Register */
+};
+
+struct rcar_can_priv {
+       struct can_priv can;    /* Must be the first member! */
+       struct net_device *ndev;
+       struct napi_struct napi;
+       struct rcar_can_regs __iomem *regs;
+       struct clk *clk;
+       u8 tx_dlc[RCAR_CAN_FIFO_DEPTH];
+       u32 tx_head;
+       u32 tx_tail;
+       u8 clock_select;
+       u8 ier;
+};
+
+static const struct can_bittiming_const rcar_can_bittiming_const = {
+       .name = RCAR_CAN_DRV_NAME,
+       .tseg1_min = 4,
+       .tseg1_max = 16,
+       .tseg2_min = 2,
+       .tseg2_max = 8,
+       .sjw_max = 4,
+       .brp_min = 1,
+       .brp_max = 1024,
+       .brp_inc = 1,
+};
+
+/* Control Register bits */
+#define RCAR_CAN_CTLR_BOM      (3 << 11) /* Bus-Off Recovery Mode Bits */
+#define RCAR_CAN_CTLR_BOM_ENT  (1 << 11) /* Entry to halt mode */
+                                       /* at bus-off entry */
+#define RCAR_CAN_CTLR_SLPM     (1 << 10)
+#define RCAR_CAN_CTLR_CANM     (3 << 8) /* Operating Mode Select Bit */
+#define RCAR_CAN_CTLR_CANM_HALT        (1 << 9)
+#define RCAR_CAN_CTLR_CANM_RESET (1 << 8)
+#define RCAR_CAN_CTLR_CANM_FORCE_RESET (3 << 8)
+#define RCAR_CAN_CTLR_MLM      (1 << 3) /* Message Lost Mode Select */
+#define RCAR_CAN_CTLR_IDFM     (3 << 1) /* ID Format Mode Select Bits */
+#define RCAR_CAN_CTLR_IDFM_MIXED (1 << 2) /* Mixed ID mode */
+#define RCAR_CAN_CTLR_MBM      (1 << 0) /* Mailbox Mode select */
+
+/* Status Register bits */
+#define RCAR_CAN_STR_RSTST     (1 << 8) /* Reset Status Bit */
+
+/* FIFO Received ID Compare Registers 0 and 1 bits */
+#define RCAR_CAN_FIDCR_IDE     (1 << 31) /* ID Extension Bit */
+#define RCAR_CAN_FIDCR_RTR     (1 << 30) /* Remote Transmission Request Bit */
+
+/* Receive FIFO Control Register bits */
+#define RCAR_CAN_RFCR_RFEST    (1 << 7) /* Receive FIFO Empty Status Flag */
+#define RCAR_CAN_RFCR_RFE      (1 << 0) /* Receive FIFO Enable */
+
+/* Transmit FIFO Control Register bits */
+#define RCAR_CAN_TFCR_TFUST    (7 << 1) /* Transmit FIFO Unsent Message */
+                                       /* Number Status Bits */
+#define RCAR_CAN_TFCR_TFUST_SHIFT 1    /* Offset of Transmit FIFO Unsent */
+                                       /* Message Number Status Bits */
+#define RCAR_CAN_TFCR_TFE      (1 << 0) /* Transmit FIFO Enable */
+
+#define RCAR_CAN_N_RX_MKREGS1  2       /* Number of mask registers */
+                                       /* for Rx mailboxes 0-31 */
+#define RCAR_CAN_N_RX_MKREGS2  8
+
+/* Bit Configuration Register settings */
+#define RCAR_CAN_BCR_TSEG1(x)  (((x) & 0x0f) << 20)
+#define RCAR_CAN_BCR_BPR(x)    (((x) & 0x3ff) << 8)
+#define RCAR_CAN_BCR_SJW(x)    (((x) & 0x3) << 4)
+#define RCAR_CAN_BCR_TSEG2(x)  ((x) & 0x07)
+
+/* Mailbox and Mask Registers bits */
+#define RCAR_CAN_IDE           (1 << 31)
+#define RCAR_CAN_RTR           (1 << 30)
+#define RCAR_CAN_SID_SHIFT     18
+
+/* Mailbox Interrupt Enable Register 1 bits */
+#define RCAR_CAN_MIER1_RXFIE   (1 << 28) /* Receive  FIFO Interrupt Enable */
+#define RCAR_CAN_MIER1_TXFIE   (1 << 24) /* Transmit FIFO Interrupt Enable */
+
+/* Interrupt Enable Register bits */
+#define RCAR_CAN_IER_ERSIE     (1 << 5) /* Error (ERS) Interrupt Enable Bit */
+#define RCAR_CAN_IER_RXFIE     (1 << 4) /* Reception FIFO Interrupt */
+                                       /* Enable Bit */
+#define RCAR_CAN_IER_TXFIE     (1 << 3) /* Transmission FIFO Interrupt */
+                                       /* Enable Bit */
+/* Interrupt Status Register bits */
+#define RCAR_CAN_ISR_ERSF      (1 << 5) /* Error (ERS) Interrupt Status Bit */
+#define RCAR_CAN_ISR_RXFF      (1 << 4) /* Reception FIFO Interrupt */
+                                       /* Status Bit */
+#define RCAR_CAN_ISR_TXFF      (1 << 3) /* Transmission FIFO Interrupt */
+                                       /* Status Bit */
+
+/* Error Interrupt Enable Register bits */
+#define RCAR_CAN_EIER_BLIE     (1 << 7) /* Bus Lock Interrupt Enable */
+#define RCAR_CAN_EIER_OLIE     (1 << 6) /* Overload Frame Transmit */
+                                       /* Interrupt Enable */
+#define RCAR_CAN_EIER_ORIE     (1 << 5) /* Receive Overrun  Interrupt Enable */
+#define RCAR_CAN_EIER_BORIE    (1 << 4) /* Bus-Off Recovery Interrupt Enable */
+#define RCAR_CAN_EIER_BOEIE    (1 << 3) /* Bus-Off Entry Interrupt Enable */
+#define RCAR_CAN_EIER_EPIE     (1 << 2) /* Error Passive Interrupt Enable */
+#define RCAR_CAN_EIER_EWIE     (1 << 1) /* Error Warning Interrupt Enable */
+#define RCAR_CAN_EIER_BEIE     (1 << 0) /* Bus Error Interrupt Enable */
+
+/* Error Interrupt Factor Judge Register bits */
+#define RCAR_CAN_EIFR_BLIF     (1 << 7) /* Bus Lock Detect Flag */
+#define RCAR_CAN_EIFR_OLIF     (1 << 6) /* Overload Frame Transmission */
+                                        /* Detect Flag */
+#define RCAR_CAN_EIFR_ORIF     (1 << 5) /* Receive Overrun Detect Flag */
+#define RCAR_CAN_EIFR_BORIF    (1 << 4) /* Bus-Off Recovery Detect Flag */
+#define RCAR_CAN_EIFR_BOEIF    (1 << 3) /* Bus-Off Entry Detect Flag */
+#define RCAR_CAN_EIFR_EPIF     (1 << 2) /* Error Passive Detect Flag */
+#define RCAR_CAN_EIFR_EWIF     (1 << 1) /* Error Warning Detect Flag */
+#define RCAR_CAN_EIFR_BEIF     (1 << 0) /* Bus Error Detect Flag */
+
+/* Error Code Store Register bits */
+#define RCAR_CAN_ECSR_EDPM     (1 << 7) /* Error Display Mode Select Bit */
+#define RCAR_CAN_ECSR_ADEF     (1 << 6) /* ACK Delimiter Error Flag */
+#define RCAR_CAN_ECSR_BE0F     (1 << 5) /* Bit Error (dominant) Flag */
+#define RCAR_CAN_ECSR_BE1F     (1 << 4) /* Bit Error (recessive) Flag */
+#define RCAR_CAN_ECSR_CEF      (1 << 3) /* CRC Error Flag */
+#define RCAR_CAN_ECSR_AEF      (1 << 2) /* ACK Error Flag */
+#define RCAR_CAN_ECSR_FEF      (1 << 1) /* Form Error Flag */
+#define RCAR_CAN_ECSR_SEF      (1 << 0) /* Stuff Error Flag */
+
+#define RCAR_CAN_NAPI_WEIGHT   4
+#define MAX_STR_READS          0x100
+
+static void tx_failure_cleanup(struct net_device *ndev)
+{
+       int i;
+
+       for (i = 0; i < RCAR_CAN_FIFO_DEPTH; i++)
+               can_free_echo_skb(ndev, i);
+}
+
+static void rcar_can_error(struct net_device *ndev)
+{
+       struct rcar_can_priv *priv = netdev_priv(ndev);
+       struct net_device_stats *stats = &ndev->stats;
+       struct can_frame *cf;
+       struct sk_buff *skb;
+       u8 eifr, txerr = 0, rxerr = 0;
+
+       /* Propagate the error condition to the CAN stack */
+       skb = alloc_can_err_skb(ndev, &cf);
+
+       eifr = readb(&priv->regs->eifr);
+       if (eifr & (RCAR_CAN_EIFR_EWIF | RCAR_CAN_EIFR_EPIF)) {
+               txerr = readb(&priv->regs->tecr);
+               rxerr = readb(&priv->regs->recr);
+               if (skb) {
+                       cf->can_id |= CAN_ERR_CRTL;
+                       cf->data[6] = txerr;
+                       cf->data[7] = rxerr;
+               }
+       }
+       if (eifr & RCAR_CAN_EIFR_BEIF) {
+               int rx_errors = 0, tx_errors = 0;
+               u8 ecsr;
+
+               netdev_dbg(priv->ndev, "Bus error interrupt:\n");
+               if (skb) {
+                       cf->can_id |= CAN_ERR_BUSERROR | CAN_ERR_PROT;
+                       cf->data[2] = CAN_ERR_PROT_UNSPEC;
+               }
+               ecsr = readb(&priv->regs->ecsr);
+               if (ecsr & RCAR_CAN_ECSR_ADEF) {
+                       netdev_dbg(priv->ndev, "ACK Delimiter Error\n");
+                       tx_errors++;
+                       writeb(~RCAR_CAN_ECSR_ADEF, &priv->regs->ecsr);
+                       if (skb)
+                               cf->data[3] |= CAN_ERR_PROT_LOC_ACK_DEL;
+               }
+               if (ecsr & RCAR_CAN_ECSR_BE0F) {
+                       netdev_dbg(priv->ndev, "Bit Error (dominant)\n");
+                       tx_errors++;
+                       writeb(~RCAR_CAN_ECSR_BE0F, &priv->regs->ecsr);
+                       if (skb)
+                               cf->data[2] |= CAN_ERR_PROT_BIT0;
+               }
+               if (ecsr & RCAR_CAN_ECSR_BE1F) {
+                       netdev_dbg(priv->ndev, "Bit Error (recessive)\n");
+                       tx_errors++;
+                       writeb(~RCAR_CAN_ECSR_BE1F, &priv->regs->ecsr);
+                       if (skb)
+                               cf->data[2] |= CAN_ERR_PROT_BIT1;
+               }
+               if (ecsr & RCAR_CAN_ECSR_CEF) {
+                       netdev_dbg(priv->ndev, "CRC Error\n");
+                       rx_errors++;
+                       writeb(~RCAR_CAN_ECSR_CEF, &priv->regs->ecsr);
+                       if (skb)
+                               cf->data[3] |= CAN_ERR_PROT_LOC_CRC_SEQ;
+               }
+               if (ecsr & RCAR_CAN_ECSR_AEF) {
+                       netdev_dbg(priv->ndev, "ACK Error\n");
+                       tx_errors++;
+                       writeb(~RCAR_CAN_ECSR_AEF, &priv->regs->ecsr);
+                       if (skb) {
+                               cf->can_id |= CAN_ERR_ACK;
+                               cf->data[3] |= CAN_ERR_PROT_LOC_ACK;
+                       }
+               }
+               if (ecsr & RCAR_CAN_ECSR_FEF) {
+                       netdev_dbg(priv->ndev, "Form Error\n");
+                       rx_errors++;
+                       writeb(~RCAR_CAN_ECSR_FEF, &priv->regs->ecsr);
+                       if (skb)
+                               cf->data[2] |= CAN_ERR_PROT_FORM;
+               }
+               if (ecsr & RCAR_CAN_ECSR_SEF) {
+                       netdev_dbg(priv->ndev, "Stuff Error\n");
+                       rx_errors++;
+                       writeb(~RCAR_CAN_ECSR_SEF, &priv->regs->ecsr);
+                       if (skb)
+                               cf->data[2] |= CAN_ERR_PROT_STUFF;
+               }
+
+               priv->can.can_stats.bus_error++;
+               ndev->stats.rx_errors += rx_errors;
+               ndev->stats.tx_errors += tx_errors;
+               writeb(~RCAR_CAN_EIFR_BEIF, &priv->regs->eifr);
+       }
+       if (eifr & RCAR_CAN_EIFR_EWIF) {
+               netdev_dbg(priv->ndev, "Error warning interrupt\n");
+               priv->can.state = CAN_STATE_ERROR_WARNING;
+               priv->can.can_stats.error_warning++;
+               /* Clear interrupt condition */
+               writeb(~RCAR_CAN_EIFR_EWIF, &priv->regs->eifr);
+               if (skb)
+                       cf->data[1] = txerr > rxerr ? CAN_ERR_CRTL_TX_WARNING :
+                                             CAN_ERR_CRTL_RX_WARNING;
+       }
+       if (eifr & RCAR_CAN_EIFR_EPIF) {
+               netdev_dbg(priv->ndev, "Error passive interrupt\n");
+               priv->can.state = CAN_STATE_ERROR_PASSIVE;
+               priv->can.can_stats.error_passive++;
+               /* Clear interrupt condition */
+               writeb(~RCAR_CAN_EIFR_EPIF, &priv->regs->eifr);
+               if (skb)
+                       cf->data[1] = txerr > rxerr ? CAN_ERR_CRTL_TX_PASSIVE :
+                                             CAN_ERR_CRTL_RX_PASSIVE;
+       }
+       if (eifr & RCAR_CAN_EIFR_BOEIF) {
+               netdev_dbg(priv->ndev, "Bus-off entry interrupt\n");
+               tx_failure_cleanup(ndev);
+               priv->ier = RCAR_CAN_IER_ERSIE;
+               writeb(priv->ier, &priv->regs->ier);
+               priv->can.state = CAN_STATE_BUS_OFF;
+               /* Clear interrupt condition */
+               writeb(~RCAR_CAN_EIFR_BOEIF, &priv->regs->eifr);
+               can_bus_off(ndev);
+               if (skb)
+                       cf->can_id |= CAN_ERR_BUSOFF;
+       }
+       if (eifr & RCAR_CAN_EIFR_ORIF) {
+               netdev_dbg(priv->ndev, "Receive overrun error interrupt\n");
+               ndev->stats.rx_over_errors++;
+               ndev->stats.rx_errors++;
+               writeb(~RCAR_CAN_EIFR_ORIF, &priv->regs->eifr);
+               if (skb) {
+                       cf->can_id |= CAN_ERR_CRTL;
+                       cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
+               }
+       }
+       if (eifr & RCAR_CAN_EIFR_OLIF) {
+               netdev_dbg(priv->ndev,
+                          "Overload Frame Transmission error interrupt\n");
+               ndev->stats.rx_over_errors++;
+               ndev->stats.rx_errors++;
+               writeb(~RCAR_CAN_EIFR_OLIF, &priv->regs->eifr);
+               if (skb) {
+                       cf->can_id |= CAN_ERR_PROT;
+                       cf->data[2] |= CAN_ERR_PROT_OVERLOAD;
+               }
+       }
+
+       if (skb) {
+               stats->rx_packets++;
+               stats->rx_bytes += cf->can_dlc;
+               netif_rx(skb);
+       }
+}
+
+static void rcar_can_tx_done(struct net_device *ndev)
+{
+       struct rcar_can_priv *priv = netdev_priv(ndev);
+       struct net_device_stats *stats = &ndev->stats;
+       u8 isr;
+
+       while (1) {
+               u8 unsent = readb(&priv->regs->tfcr);
+
+               unsent = (unsent & RCAR_CAN_TFCR_TFUST) >>
+                         RCAR_CAN_TFCR_TFUST_SHIFT;
+               if (priv->tx_head - priv->tx_tail <= unsent)
+                       break;
+               stats->tx_packets++;
+               stats->tx_bytes += priv->tx_dlc[priv->tx_tail %
+                                               RCAR_CAN_FIFO_DEPTH];
+               priv->tx_dlc[priv->tx_tail % RCAR_CAN_FIFO_DEPTH] = 0;
+               can_get_echo_skb(ndev, priv->tx_tail % RCAR_CAN_FIFO_DEPTH);
+               priv->tx_tail++;
+               netif_wake_queue(ndev);
+       }
+       /* Clear interrupt */
+       isr = readb(&priv->regs->isr);
+       writeb(isr & ~RCAR_CAN_ISR_TXFF, &priv->regs->isr);
+       can_led_event(ndev, CAN_LED_EVENT_TX);
+}
+
+static irqreturn_t rcar_can_interrupt(int irq, void *dev_id)
+{
+       struct net_device *ndev = dev_id;
+       struct rcar_can_priv *priv = netdev_priv(ndev);
+       u8 isr;
+
+       isr = readb(&priv->regs->isr);
+       if (!(isr & priv->ier))
+               return IRQ_NONE;
+
+       if (isr & RCAR_CAN_ISR_ERSF)
+               rcar_can_error(ndev);
+
+       if (isr & RCAR_CAN_ISR_TXFF)
+               rcar_can_tx_done(ndev);
+
+       if (isr & RCAR_CAN_ISR_RXFF) {
+               if (napi_schedule_prep(&priv->napi)) {
+                       /* Disable Rx FIFO interrupts */
+                       priv->ier &= ~RCAR_CAN_IER_RXFIE;
+                       writeb(priv->ier, &priv->regs->ier);
+                       __napi_schedule(&priv->napi);
+               }
+       }
+
+       return IRQ_HANDLED;
+}
+
+static void rcar_can_set_bittiming(struct net_device *dev)
+{
+       struct rcar_can_priv *priv = netdev_priv(dev);
+       struct can_bittiming *bt = &priv->can.bittiming;
+       u32 bcr;
+
+       bcr = RCAR_CAN_BCR_TSEG1(bt->phase_seg1 + bt->prop_seg - 1) |
+             RCAR_CAN_BCR_BPR(bt->brp - 1) | RCAR_CAN_BCR_SJW(bt->sjw - 1) |
+             RCAR_CAN_BCR_TSEG2(bt->phase_seg2 - 1);
+       /* Don't overwrite CLKR with 32-bit BCR access; CLKR has 8-bit access.
+        * All the registers are big-endian but they get byte-swapped on 32-bit
+        * read/write (but not on 8-bit, contrary to the manuals)...
+        */
+       writel((bcr << 8) | priv->clock_select, &priv->regs->bcr);
+}
+
+static void rcar_can_start(struct net_device *ndev)
+{
+       struct rcar_can_priv *priv = netdev_priv(ndev);
+       u16 ctlr;
+       int i;
+
+       /* Set controller to known mode:
+        * - FIFO mailbox mode
+        * - accept all messages
+        * - overrun mode
+        * CAN is in sleep mode after MCU hardware or software reset.
+        */
+       ctlr = readw(&priv->regs->ctlr);
+       ctlr &= ~RCAR_CAN_CTLR_SLPM;
+       writew(ctlr, &priv->regs->ctlr);
+       /* Go to reset mode */
+       ctlr |= RCAR_CAN_CTLR_CANM_FORCE_RESET;
+       writew(ctlr, &priv->regs->ctlr);
+       for (i = 0; i < MAX_STR_READS; i++) {
+               if (readw(&priv->regs->str) & RCAR_CAN_STR_RSTST)
+                       break;
+       }
+       rcar_can_set_bittiming(ndev);
+       ctlr |= RCAR_CAN_CTLR_IDFM_MIXED; /* Select mixed ID mode */
+       ctlr |= RCAR_CAN_CTLR_BOM_ENT;  /* Entry to halt mode automatically */
+                                       /* at bus-off */
+       ctlr |= RCAR_CAN_CTLR_MBM;      /* Select FIFO mailbox mode */
+       ctlr |= RCAR_CAN_CTLR_MLM;      /* Overrun mode */
+       writew(ctlr, &priv->regs->ctlr);
+
+       /* Accept all SID and EID */
+       writel(0, &priv->regs->mkr_2_9[6]);
+       writel(0, &priv->regs->mkr_2_9[7]);
+       /* In FIFO mailbox mode, write "0" to bits 24 to 31 */
+       writel(0, &priv->regs->mkivlr1);
+       /* Accept all frames */
+       writel(0, &priv->regs->fidcr[0]);
+       writel(RCAR_CAN_FIDCR_IDE | RCAR_CAN_FIDCR_RTR, &priv->regs->fidcr[1]);
+       /* Enable and configure FIFO mailbox interrupts */
+       writel(RCAR_CAN_MIER1_RXFIE | RCAR_CAN_MIER1_TXFIE, &priv->regs->mier1);
+
+       priv->ier = RCAR_CAN_IER_ERSIE | RCAR_CAN_IER_RXFIE |
+                   RCAR_CAN_IER_TXFIE;
+       writeb(priv->ier, &priv->regs->ier);
+
+       /* Accumulate error codes */
+       writeb(RCAR_CAN_ECSR_EDPM, &priv->regs->ecsr);
+       /* Enable error interrupts */
+       writeb(RCAR_CAN_EIER_EWIE | RCAR_CAN_EIER_EPIE | RCAR_CAN_EIER_BOEIE |
+              (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING ?
+              RCAR_CAN_EIER_BEIE : 0) | RCAR_CAN_EIER_ORIE |
+              RCAR_CAN_EIER_OLIE, &priv->regs->eier);
+       priv->can.state = CAN_STATE_ERROR_ACTIVE;
+
+       /* Go to operation mode */
+       writew(ctlr & ~RCAR_CAN_CTLR_CANM, &priv->regs->ctlr);
+       for (i = 0; i < MAX_STR_READS; i++) {
+               if (!(readw(&priv->regs->str) & RCAR_CAN_STR_RSTST))
+                       break;
+       }
+       /* Enable Rx and Tx FIFO */
+       writeb(RCAR_CAN_RFCR_RFE, &priv->regs->rfcr);
+       writeb(RCAR_CAN_TFCR_TFE, &priv->regs->tfcr);
+}
+
+static int rcar_can_open(struct net_device *ndev)
+{
+       struct rcar_can_priv *priv = netdev_priv(ndev);
+       int err;
+
+       err = clk_prepare_enable(priv->clk);
+       if (err) {
+               netdev_err(ndev, "clk_prepare_enable() failed, error %d\n",
+                          err);
+               goto out;
+       }
+       err = open_candev(ndev);
+       if (err) {
+               netdev_err(ndev, "open_candev() failed, error %d\n", err);
+               goto out_clock;
+       }
+       napi_enable(&priv->napi);
+       err = request_irq(ndev->irq, rcar_can_interrupt, 0, ndev->name, ndev);
+       if (err) {
+               netdev_err(ndev, "error requesting interrupt %x\n", ndev->irq);
+               goto out_close;
+       }
+       can_led_event(ndev, CAN_LED_EVENT_OPEN);
+       rcar_can_start(ndev);
+       netif_start_queue(ndev);
+       return 0;
+out_close:
+       napi_disable(&priv->napi);
+       close_candev(ndev);
+out_clock:
+       clk_disable_unprepare(priv->clk);
+out:
+       return err;
+}
+
+static void rcar_can_stop(struct net_device *ndev)
+{
+       struct rcar_can_priv *priv = netdev_priv(ndev);
+       u16 ctlr;
+       int i;
+
+       /* Go to (force) reset mode */
+       ctlr = readw(&priv->regs->ctlr);
+       ctlr |= RCAR_CAN_CTLR_CANM_FORCE_RESET;
+       writew(ctlr, &priv->regs->ctlr);
+       for (i = 0; i < MAX_STR_READS; i++) {
+               if (readw(&priv->regs->str) & RCAR_CAN_STR_RSTST)
+                       break;
+       }
+       writel(0, &priv->regs->mier0);
+       writel(0, &priv->regs->mier1);
+       writeb(0, &priv->regs->ier);
+       writeb(0, &priv->regs->eier);
+       /* Go to sleep mode */
+       ctlr |= RCAR_CAN_CTLR_SLPM;
+       writew(ctlr, &priv->regs->ctlr);
+       priv->can.state = CAN_STATE_STOPPED;
+}
+
+static int rcar_can_close(struct net_device *ndev)
+{
+       struct rcar_can_priv *priv = netdev_priv(ndev);
+
+       netif_stop_queue(ndev);
+       rcar_can_stop(ndev);
+       free_irq(ndev->irq, ndev);
+       napi_disable(&priv->napi);
+       clk_disable_unprepare(priv->clk);
+       close_candev(ndev);
+       can_led_event(ndev, CAN_LED_EVENT_STOP);
+       return 0;
+}
+
+static netdev_tx_t rcar_can_start_xmit(struct sk_buff *skb,
+                                      struct net_device *ndev)
+{
+       struct rcar_can_priv *priv = netdev_priv(ndev);
+       struct can_frame *cf = (struct can_frame *)skb->data;
+       u32 data, i;
+
+       if (can_dropped_invalid_skb(ndev, skb))
+               return NETDEV_TX_OK;
+
+       if (cf->can_id & CAN_EFF_FLAG)  /* Extended frame format */
+               data = (cf->can_id & CAN_EFF_MASK) | RCAR_CAN_IDE;
+       else                            /* Standard frame format */
+               data = (cf->can_id & CAN_SFF_MASK) << RCAR_CAN_SID_SHIFT;
+
+       if (cf->can_id & CAN_RTR_FLAG) { /* Remote transmission request */
+               data |= RCAR_CAN_RTR;
+       } else {
+               for (i = 0; i < cf->can_dlc; i++)
+                       writeb(cf->data[i],
+                              &priv->regs->mb[RCAR_CAN_TX_FIFO_MBX].data[i]);
+       }
+
+       writel(data, &priv->regs->mb[RCAR_CAN_TX_FIFO_MBX].id);
+
+       writeb(cf->can_dlc, &priv->regs->mb[RCAR_CAN_TX_FIFO_MBX].dlc);
+
+       priv->tx_dlc[priv->tx_head % RCAR_CAN_FIFO_DEPTH] = cf->can_dlc;
+       can_put_echo_skb(skb, ndev, priv->tx_head % RCAR_CAN_FIFO_DEPTH);
+       priv->tx_head++;
+       /* Start Tx: write 0xff to the TFPCR register to increment
+        * the CPU-side pointer for the transmit FIFO to the next
+        * mailbox location
+        */
+       writeb(0xff, &priv->regs->tfpcr);
+       /* Stop the queue if we've filled all FIFO entries */
+       if (priv->tx_head - priv->tx_tail >= RCAR_CAN_FIFO_DEPTH)
+               netif_stop_queue(ndev);
+
+       return NETDEV_TX_OK;
+}
+
+static const struct net_device_ops rcar_can_netdev_ops = {
+       .ndo_open = rcar_can_open,
+       .ndo_stop = rcar_can_close,
+       .ndo_start_xmit = rcar_can_start_xmit,
+};
+
+static void rcar_can_rx_pkt(struct rcar_can_priv *priv)
+{
+       struct net_device_stats *stats = &priv->ndev->stats;
+       struct can_frame *cf;
+       struct sk_buff *skb;
+       u32 data;
+       u8 dlc;
+
+       skb = alloc_can_skb(priv->ndev, &cf);
+       if (!skb) {
+               stats->rx_dropped++;
+               return;
+       }
+
+       data = readl(&priv->regs->mb[RCAR_CAN_RX_FIFO_MBX].id);
+       if (data & RCAR_CAN_IDE)
+               cf->can_id = (data & CAN_EFF_MASK) | CAN_EFF_FLAG;
+       else
+               cf->can_id = (data >> RCAR_CAN_SID_SHIFT) & CAN_SFF_MASK;
+
+       dlc = readb(&priv->regs->mb[RCAR_CAN_RX_FIFO_MBX].dlc);
+       cf->can_dlc = get_can_dlc(dlc);
+       if (data & RCAR_CAN_RTR) {
+               cf->can_id |= CAN_RTR_FLAG;
+       } else {
+               for (dlc = 0; dlc < cf->can_dlc; dlc++)
+                       cf->data[dlc] =
+                       readb(&priv->regs->mb[RCAR_CAN_RX_FIFO_MBX].data[dlc]);
+       }
+
+       can_led_event(priv->ndev, CAN_LED_EVENT_RX);
+
+       stats->rx_bytes += cf->can_dlc;
+       stats->rx_packets++;
+       netif_receive_skb(skb);
+}
+
+static int rcar_can_rx_poll(struct napi_struct *napi, int quota)
+{
+       struct rcar_can_priv *priv = container_of(napi,
+                                                 struct rcar_can_priv, napi);
+       int num_pkts;
+
+       for (num_pkts = 0; num_pkts < quota; num_pkts++) {
+               u8 rfcr, isr;
+
+               isr = readb(&priv->regs->isr);
+               /* Clear interrupt bit */
+               if (isr & RCAR_CAN_ISR_RXFF)
+                       writeb(isr & ~RCAR_CAN_ISR_RXFF, &priv->regs->isr);
+               rfcr = readb(&priv->regs->rfcr);
+               if (rfcr & RCAR_CAN_RFCR_RFEST)
+                       break;
+               rcar_can_rx_pkt(priv);
+               /* Write 0xff to the RFPCR register to increment
+                * the CPU-side pointer for the receive FIFO
+                * to the next mailbox location
+                */
+               writeb(0xff, &priv->regs->rfpcr);
+       }
+       /* All packets processed */
+       if (num_pkts < quota) {
+               napi_complete(napi);
+               priv->ier |= RCAR_CAN_IER_RXFIE;
+               writeb(priv->ier, &priv->regs->ier);
+       }
+       return num_pkts;
+}
+
+static int rcar_can_do_set_mode(struct net_device *ndev, enum can_mode mode)
+{
+       switch (mode) {
+       case CAN_MODE_START:
+               rcar_can_start(ndev);
+               netif_wake_queue(ndev);
+               return 0;
+       default:
+               return -EOPNOTSUPP;
+       }
+}
+
+static int rcar_can_get_berr_counter(const struct net_device *dev,
+                                    struct can_berr_counter *bec)
+{
+       struct rcar_can_priv *priv = netdev_priv(dev);
+       int err;
+
+       err = clk_prepare_enable(priv->clk);
+       if (err)
+               return err;
+       bec->txerr = readb(&priv->regs->tecr);
+       bec->rxerr = readb(&priv->regs->recr);
+       clk_disable_unprepare(priv->clk);
+       return 0;
+}
+
+static int rcar_can_probe(struct platform_device *pdev)
+{
+       struct rcar_can_platform_data *pdata;
+       struct rcar_can_priv *priv;
+       struct net_device *ndev;
+       struct resource *mem;
+       void __iomem *addr;
+       int err = -ENODEV;
+       int irq;
+
+       pdata = dev_get_platdata(&pdev->dev);
+       if (!pdata) {
+               dev_err(&pdev->dev, "No platform data provided!\n");
+               goto fail;
+       }
+
+       irq = platform_get_irq(pdev, 0);
+       if (!irq) {
+               dev_err(&pdev->dev, "No IRQ resource\n");
+               goto fail;
+       }
+
+       mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       addr = devm_ioremap_resource(&pdev->dev, mem);
+       if (IS_ERR(addr)) {
+               err = PTR_ERR(addr);
+               goto fail;
+       }
+
+       ndev = alloc_candev(sizeof(struct rcar_can_priv), RCAR_CAN_FIFO_DEPTH);
+       if (!ndev) {
+               dev_err(&pdev->dev, "alloc_candev() failed\n");
+               err = -ENOMEM;
+               goto fail;
+       }
+
+       priv = netdev_priv(ndev);
+
+       priv->clk = devm_clk_get(&pdev->dev, NULL);
+       if (IS_ERR(priv->clk)) {
+               err = PTR_ERR(priv->clk);
+               dev_err(&pdev->dev, "cannot get clock: %d\n", err);
+               goto fail_clk;
+       }
+
+       ndev->netdev_ops = &rcar_can_netdev_ops;
+       ndev->irq = irq;
+       ndev->flags |= IFF_ECHO;
+       priv->ndev = ndev;
+       priv->regs = addr;
+       priv->clock_select = pdata->clock_select;
+       priv->can.clock.freq = clk_get_rate(priv->clk);
+       priv->can.bittiming_const = &rcar_can_bittiming_const;
+       priv->can.do_set_mode = rcar_can_do_set_mode;
+       priv->can.do_get_berr_counter = rcar_can_get_berr_counter;
+       priv->can.ctrlmode_supported = CAN_CTRLMODE_BERR_REPORTING;
+       platform_set_drvdata(pdev, ndev);
+       SET_NETDEV_DEV(ndev, &pdev->dev);
+
+       netif_napi_add(ndev, &priv->napi, rcar_can_rx_poll,
+                      RCAR_CAN_NAPI_WEIGHT);
+       err = register_candev(ndev);
+       if (err) {
+               dev_err(&pdev->dev, "register_candev() failed, error %d\n",
+                       err);
+               goto fail_candev;
+       }
+
+       devm_can_led_init(ndev);
+
+       dev_info(&pdev->dev, "device registered (reg_base=%p, irq=%u)\n",
+                priv->regs, ndev->irq);
+
+       return 0;
+fail_candev:
+       netif_napi_del(&priv->napi);
+fail_clk:
+       free_candev(ndev);
+fail:
+       return err;
+}
+
+static int rcar_can_remove(struct platform_device *pdev)
+{
+       struct net_device *ndev = platform_get_drvdata(pdev);
+       struct rcar_can_priv *priv = netdev_priv(ndev);
+
+       unregister_candev(ndev);
+       netif_napi_del(&priv->napi);
+       free_candev(ndev);
+       return 0;
+}
+
+static int __maybe_unused rcar_can_suspend(struct device *dev)
+{
+       struct net_device *ndev = dev_get_drvdata(dev);
+       struct rcar_can_priv *priv = netdev_priv(ndev);
+       u16 ctlr;
+
+       if (netif_running(ndev)) {
+               netif_stop_queue(ndev);
+               netif_device_detach(ndev);
+       }
+       ctlr = readw(&priv->regs->ctlr);
+       ctlr |= RCAR_CAN_CTLR_CANM_HALT;
+       writew(ctlr, &priv->regs->ctlr);
+       ctlr |= RCAR_CAN_CTLR_SLPM;
+       writew(ctlr, &priv->regs->ctlr);
+       priv->can.state = CAN_STATE_SLEEPING;
+
+       clk_disable(priv->clk);
+       return 0;
+}
+
+static int __maybe_unused rcar_can_resume(struct device *dev)
+{
+       struct net_device *ndev = dev_get_drvdata(dev);
+       struct rcar_can_priv *priv = netdev_priv(ndev);
+       u16 ctlr;
+       int err;
+
+       err = clk_enable(priv->clk);
+       if (err) {
+               netdev_err(ndev, "clk_enable() failed, error %d\n", err);
+               return err;
+       }
+
+       ctlr = readw(&priv->regs->ctlr);
+       ctlr &= ~RCAR_CAN_CTLR_SLPM;
+       writew(ctlr, &priv->regs->ctlr);
+       ctlr &= ~RCAR_CAN_CTLR_CANM;
+       writew(ctlr, &priv->regs->ctlr);
+       priv->can.state = CAN_STATE_ERROR_ACTIVE;
+
+       if (netif_running(ndev)) {
+               netif_device_attach(ndev);
+               netif_start_queue(ndev);
+       }
+       return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(rcar_can_pm_ops, rcar_can_suspend, rcar_can_resume);
+
+static struct platform_driver rcar_can_driver = {
+       .driver = {
+               .name = RCAR_CAN_DRV_NAME,
+               .owner = THIS_MODULE,
+               .pm = &rcar_can_pm_ops,
+       },
+       .probe = rcar_can_probe,
+       .remove = rcar_can_remove,
+};
+
+module_platform_driver(rcar_can_driver);
+
+MODULE_AUTHOR("Cogent Embedded, Inc.");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("CAN driver for Renesas R-Car SoC");
+MODULE_ALIAS("platform:" RCAR_CAN_DRV_NAME);
index df136a2516c401a5d96aba3d7c0da2e9f511d1a8..014695d7e6a342c49e9c86ed66f59ba5cddc0335 100644 (file)
@@ -46,6 +46,7 @@ static int clk[MAXDEV];
 static unsigned char cdr[MAXDEV] = {[0 ... (MAXDEV - 1)] = 0xff};
 static unsigned char ocr[MAXDEV] = {[0 ... (MAXDEV - 1)] = 0xff};
 static int indirect[MAXDEV] = {[0 ... (MAXDEV - 1)] = -1};
+static spinlock_t indirect_lock[MAXDEV];  /* lock for indirect access mode */
 
 module_param_array(port, ulong, NULL, S_IRUGO);
 MODULE_PARM_DESC(port, "I/O port number");
@@ -101,19 +102,26 @@ static void sja1000_isa_port_write_reg(const struct sja1000_priv *priv,
 static u8 sja1000_isa_port_read_reg_indirect(const struct sja1000_priv *priv,
                                             int reg)
 {
-       unsigned long base = (unsigned long)priv->reg_base;
+       unsigned long flags, base = (unsigned long)priv->reg_base;
+       u8 readval;
 
+       spin_lock_irqsave(&indirect_lock[priv->dev->dev_id], flags);
        outb(reg, base);
-       return inb(base + 1);
+       readval = inb(base + 1);
+       spin_unlock_irqrestore(&indirect_lock[priv->dev->dev_id], flags);
+
+       return readval;
 }
 
 static void sja1000_isa_port_write_reg_indirect(const struct sja1000_priv *priv,
                                                int reg, u8 val)
 {
-       unsigned long base = (unsigned long)priv->reg_base;
+       unsigned long flags, base = (unsigned long)priv->reg_base;
 
+       spin_lock_irqsave(&indirect_lock[priv->dev->dev_id], flags);
        outb(reg, base);
        outb(val, base + 1);
+       spin_unlock_irqrestore(&indirect_lock[priv->dev->dev_id], flags);
 }
 
 static int sja1000_isa_probe(struct platform_device *pdev)
@@ -169,6 +177,7 @@ static int sja1000_isa_probe(struct platform_device *pdev)
                if (iosize == SJA1000_IOSIZE_INDIRECT) {
                        priv->read_reg = sja1000_isa_port_read_reg_indirect;
                        priv->write_reg = sja1000_isa_port_write_reg_indirect;
+                       spin_lock_init(&indirect_lock[idx]);
                } else {
                        priv->read_reg = sja1000_isa_port_read_reg;
                        priv->write_reg = sja1000_isa_port_write_reg;
@@ -198,6 +207,7 @@ static int sja1000_isa_probe(struct platform_device *pdev)
 
        platform_set_drvdata(pdev, dev);
        SET_NETDEV_DEV(dev, &pdev->dev);
+       dev->dev_id = idx;
 
        err = register_sja1000dev(dev);
        if (err) {
index f5b16e0e3a125f4e38a93408e92b8218808339af..dcf9196f63164b0db099e17a8d1208d9ac158ac4 100644 (file)
@@ -322,13 +322,13 @@ static void slcan_write_wakeup(struct tty_struct *tty)
        if (!sl || sl->magic != SLCAN_MAGIC || !netif_running(sl->dev))
                return;
 
-       spin_lock(&sl->lock);
+       spin_lock_bh(&sl->lock);
        if (sl->xleft <= 0)  {
                /* Now serial buffer is almost free & we can start
                 * transmission of another packet */
                sl->dev->stats.tx_packets++;
                clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
-               spin_unlock(&sl->lock);
+               spin_unlock_bh(&sl->lock);
                netif_wake_queue(sl->dev);
                return;
        }
@@ -336,7 +336,7 @@ static void slcan_write_wakeup(struct tty_struct *tty)
        actual = tty->ops->write(tty, sl->xhead, sl->xleft);
        sl->xleft -= actual;
        sl->xhead += actual;
-       spin_unlock(&sl->lock);
+       spin_unlock_bh(&sl->lock);
 }
 
 /* Send a can_frame to a TTY queue. */
index 7d8c8f3672dd993119a28f478e5cc43514aa131c..bacd236ce3064d357271ff67e3ccacbfedddd953 100644 (file)
@@ -556,15 +556,6 @@ failed:
 /*
  * netdev sysfs
  */
-static ssize_t show_channel(struct device *dev, struct device_attribute *attr,
-               char *buf)
-{
-       struct net_device *ndev = to_net_dev(dev);
-       struct softing_priv *priv = netdev2softing(ndev);
-
-       return sprintf(buf, "%i\n", priv->index);
-}
-
 static ssize_t show_chip(struct device *dev, struct device_attribute *attr,
                char *buf)
 {
@@ -609,12 +600,10 @@ static ssize_t store_output(struct device *dev, struct device_attribute *attr,
        return count;
 }
 
-static const DEVICE_ATTR(channel, S_IRUGO, show_channel, NULL);
 static const DEVICE_ATTR(chip, S_IRUGO, show_chip, NULL);
 static const DEVICE_ATTR(output, S_IRUGO | S_IWUSR, show_output, store_output);
 
 static const struct attribute *const netdev_sysfs_attrs[] = {
-       &dev_attr_channel.attr,
        &dev_attr_chip.attr,
        &dev_attr_output.attr,
        NULL,
@@ -679,17 +668,20 @@ static int softing_netdev_register(struct net_device *netdev)
 {
        int ret;
 
-       netdev->sysfs_groups[0] = &netdev_sysfs_group;
        ret = register_candev(netdev);
        if (ret) {
                dev_alert(&netdev->dev, "register failed\n");
                return ret;
        }
+       if (sysfs_create_group(&netdev->dev.kobj, &netdev_sysfs_group) < 0)
+               netdev_alert(netdev, "sysfs group failed\n");
+
        return 0;
 }
 
 static void softing_netdev_cleanup(struct net_device *netdev)
 {
+       sysfs_remove_group(&netdev->dev.kobj, &netdev_sysfs_group);
        unregister_candev(netdev);
        free_candev(netdev);
 }
@@ -721,8 +713,6 @@ DEV_ATTR_RO(firmware_version, id.fw_version);
 DEV_ATTR_RO_STR(hardware, pdat->name);
 DEV_ATTR_RO(hardware_version, id.hw_version);
 DEV_ATTR_RO(license, id.license);
-DEV_ATTR_RO(frequency, id.freq);
-DEV_ATTR_RO(txpending, tx.pending);
 
 static struct attribute *softing_pdev_attrs[] = {
        &dev_attr_serial.attr,
@@ -731,8 +721,6 @@ static struct attribute *softing_pdev_attrs[] = {
        &dev_attr_hardware.attr,
        &dev_attr_hardware_version.attr,
        &dev_attr_license.attr,
-       &dev_attr_frequency.attr,
-       &dev_attr_txpending.attr,
        NULL,
 };
 
diff --git a/drivers/net/can/spi/Kconfig b/drivers/net/can/spi/Kconfig
new file mode 100644 (file)
index 0000000..148cae5
--- /dev/null
@@ -0,0 +1,10 @@
+menu "CAN SPI interfaces"
+       depends on SPI
+
+config CAN_MCP251X
+       tristate "Microchip MCP251x SPI CAN controllers"
+       depends on HAS_DMA
+       ---help---
+         Driver for the Microchip MCP251x SPI CAN controllers.
+
+endmenu
diff --git a/drivers/net/can/spi/Makefile b/drivers/net/can/spi/Makefile
new file mode 100644 (file)
index 0000000..90bcacf
--- /dev/null
@@ -0,0 +1,8 @@
+#
+#  Makefile for the Linux Controller Area Network SPI drivers.
+#
+
+
+obj-$(CONFIG_CAN_MCP251X)      += mcp251x.o
+
+ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG
diff --git a/drivers/net/can/spi/mcp251x.c b/drivers/net/can/spi/mcp251x.c
new file mode 100644 (file)
index 0000000..5df239e
--- /dev/null
@@ -0,0 +1,1266 @@
+/*
+ * CAN bus driver for Microchip 251x CAN Controller with SPI Interface
+ *
+ * MCP2510 support and bug fixes by Christian Pellegrin
+ * <chripell@evolware.org>
+ *
+ * Copyright 2009 Christian Pellegrin EVOL S.r.l.
+ *
+ * Copyright 2007 Raymarine UK, Ltd. All Rights Reserved.
+ * Written under contract by:
+ *   Chris Elston, Katalix Systems, Ltd.
+ *
+ * Based on Microchip MCP251x CAN controller driver written by
+ * David Vrabel, Copyright 2006 Arcom Control Systems Ltd.
+ *
+ * Based on CAN bus driver for the CCAN controller written by
+ * - Sascha Hauer, Marc Kleine-Budde, Pengutronix
+ * - Simon Kallweit, intefo AG
+ * Copyright 2007
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the version 2 of the GNU General Public License
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ *
+ *
+ * Your platform definition file should specify something like:
+ *
+ * static struct mcp251x_platform_data mcp251x_info = {
+ *         .oscillator_frequency = 8000000,
+ * };
+ *
+ * static struct spi_board_info spi_board_info[] = {
+ *         {
+ *                 .modalias = "mcp2510",
+ *                     // or "mcp2515" depending on your controller
+ *                 .platform_data = &mcp251x_info,
+ *                 .irq = IRQ_EINT13,
+ *                 .max_speed_hz = 2*1000*1000,
+ *                 .chip_select = 2,
+ *         },
+ * };
+ *
+ * Please see mcp251x.h for a description of the fields in
+ * struct mcp251x_platform_data.
+ *
+ */
+
+#include <linux/can/core.h>
+#include <linux/can/dev.h>
+#include <linux/can/led.h>
+#include <linux/can/platform/mcp251x.h>
+#include <linux/clk.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/freezer.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spi/spi.h>
+#include <linux/uaccess.h>
+#include <linux/regulator/consumer.h>
+
+/* SPI interface instruction set */
+#define INSTRUCTION_WRITE      0x02
+#define INSTRUCTION_READ       0x03
+#define INSTRUCTION_BIT_MODIFY 0x05
+#define INSTRUCTION_LOAD_TXB(n)        (0x40 + 2 * (n))
+#define INSTRUCTION_READ_RXB(n)        (((n) == 0) ? 0x90 : 0x94)
+#define INSTRUCTION_RESET      0xC0
+#define RTS_TXB0               0x01
+#define RTS_TXB1               0x02
+#define RTS_TXB2               0x04
+#define INSTRUCTION_RTS(n)     (0x80 | ((n) & 0x07))
+
+
+/* MPC251x registers */
+#define CANSTAT              0x0e
+#define CANCTRL              0x0f
+#  define CANCTRL_REQOP_MASK       0xe0
+#  define CANCTRL_REQOP_CONF       0x80
+#  define CANCTRL_REQOP_LISTEN_ONLY 0x60
+#  define CANCTRL_REQOP_LOOPBACK    0x40
+#  define CANCTRL_REQOP_SLEEP      0x20
+#  define CANCTRL_REQOP_NORMAL     0x00
+#  define CANCTRL_OSM              0x08
+#  define CANCTRL_ABAT             0x10
+#define TEC          0x1c
+#define REC          0x1d
+#define CNF1         0x2a
+#  define CNF1_SJW_SHIFT   6
+#define CNF2         0x29
+#  define CNF2_BTLMODE    0x80
+#  define CNF2_SAM         0x40
+#  define CNF2_PS1_SHIFT   3
+#define CNF3         0x28
+#  define CNF3_SOF        0x08
+#  define CNF3_WAKFIL     0x04
+#  define CNF3_PHSEG2_MASK 0x07
+#define CANINTE              0x2b
+#  define CANINTE_MERRE 0x80
+#  define CANINTE_WAKIE 0x40
+#  define CANINTE_ERRIE 0x20
+#  define CANINTE_TX2IE 0x10
+#  define CANINTE_TX1IE 0x08
+#  define CANINTE_TX0IE 0x04
+#  define CANINTE_RX1IE 0x02
+#  define CANINTE_RX0IE 0x01
+#define CANINTF              0x2c
+#  define CANINTF_MERRF 0x80
+#  define CANINTF_WAKIF 0x40
+#  define CANINTF_ERRIF 0x20
+#  define CANINTF_TX2IF 0x10
+#  define CANINTF_TX1IF 0x08
+#  define CANINTF_TX0IF 0x04
+#  define CANINTF_RX1IF 0x02
+#  define CANINTF_RX0IF 0x01
+#  define CANINTF_RX (CANINTF_RX0IF | CANINTF_RX1IF)
+#  define CANINTF_TX (CANINTF_TX2IF | CANINTF_TX1IF | CANINTF_TX0IF)
+#  define CANINTF_ERR (CANINTF_ERRIF)
+#define EFLG         0x2d
+#  define EFLG_EWARN   0x01
+#  define EFLG_RXWAR   0x02
+#  define EFLG_TXWAR   0x04
+#  define EFLG_RXEP    0x08
+#  define EFLG_TXEP    0x10
+#  define EFLG_TXBO    0x20
+#  define EFLG_RX0OVR  0x40
+#  define EFLG_RX1OVR  0x80
+#define TXBCTRL(n)  (((n) * 0x10) + 0x30 + TXBCTRL_OFF)
+#  define TXBCTRL_ABTF 0x40
+#  define TXBCTRL_MLOA 0x20
+#  define TXBCTRL_TXERR 0x10
+#  define TXBCTRL_TXREQ 0x08
+#define TXBSIDH(n)  (((n) * 0x10) + 0x30 + TXBSIDH_OFF)
+#  define SIDH_SHIFT    3
+#define TXBSIDL(n)  (((n) * 0x10) + 0x30 + TXBSIDL_OFF)
+#  define SIDL_SID_MASK    7
+#  define SIDL_SID_SHIFT   5
+#  define SIDL_EXIDE_SHIFT 3
+#  define SIDL_EID_SHIFT   16
+#  define SIDL_EID_MASK    3
+#define TXBEID8(n)  (((n) * 0x10) + 0x30 + TXBEID8_OFF)
+#define TXBEID0(n)  (((n) * 0x10) + 0x30 + TXBEID0_OFF)
+#define TXBDLC(n)   (((n) * 0x10) + 0x30 + TXBDLC_OFF)
+#  define DLC_RTR_SHIFT    6
+#define TXBCTRL_OFF 0
+#define TXBSIDH_OFF 1
+#define TXBSIDL_OFF 2
+#define TXBEID8_OFF 3
+#define TXBEID0_OFF 4
+#define TXBDLC_OFF  5
+#define TXBDAT_OFF  6
+#define RXBCTRL(n)  (((n) * 0x10) + 0x60 + RXBCTRL_OFF)
+#  define RXBCTRL_BUKT 0x04
+#  define RXBCTRL_RXM0 0x20
+#  define RXBCTRL_RXM1 0x40
+#define RXBSIDH(n)  (((n) * 0x10) + 0x60 + RXBSIDH_OFF)
+#  define RXBSIDH_SHIFT 3
+#define RXBSIDL(n)  (((n) * 0x10) + 0x60 + RXBSIDL_OFF)
+#  define RXBSIDL_IDE   0x08
+#  define RXBSIDL_SRR   0x10
+#  define RXBSIDL_EID   3
+#  define RXBSIDL_SHIFT 5
+#define RXBEID8(n)  (((n) * 0x10) + 0x60 + RXBEID8_OFF)
+#define RXBEID0(n)  (((n) * 0x10) + 0x60 + RXBEID0_OFF)
+#define RXBDLC(n)   (((n) * 0x10) + 0x60 + RXBDLC_OFF)
+#  define RXBDLC_LEN_MASK  0x0f
+#  define RXBDLC_RTR       0x40
+#define RXBCTRL_OFF 0
+#define RXBSIDH_OFF 1
+#define RXBSIDL_OFF 2
+#define RXBEID8_OFF 3
+#define RXBEID0_OFF 4
+#define RXBDLC_OFF  5
+#define RXBDAT_OFF  6
+#define RXFSIDH(n) ((n) * 4)
+#define RXFSIDL(n) ((n) * 4 + 1)
+#define RXFEID8(n) ((n) * 4 + 2)
+#define RXFEID0(n) ((n) * 4 + 3)
+#define RXMSIDH(n) ((n) * 4 + 0x20)
+#define RXMSIDL(n) ((n) * 4 + 0x21)
+#define RXMEID8(n) ((n) * 4 + 0x22)
+#define RXMEID0(n) ((n) * 4 + 0x23)
+
+#define GET_BYTE(val, byte)                    \
+       (((val) >> ((byte) * 8)) & 0xff)
+#define SET_BYTE(val, byte)                    \
+       (((val) & 0xff) << ((byte) * 8))
+
+/*
+ * Buffer size required for the largest SPI transfer (i.e., reading a
+ * frame)
+ */
+#define CAN_FRAME_MAX_DATA_LEN 8
+#define SPI_TRANSFER_BUF_LEN   (6 + CAN_FRAME_MAX_DATA_LEN)
+#define CAN_FRAME_MAX_BITS     128
+
+#define TX_ECHO_SKB_MAX        1
+
+#define MCP251X_OST_DELAY_MS   (5)
+
+#define DEVICE_NAME "mcp251x"
+
+static int mcp251x_enable_dma; /* Enable SPI DMA. Default: 0 (Off) */
+module_param(mcp251x_enable_dma, int, S_IRUGO);
+MODULE_PARM_DESC(mcp251x_enable_dma, "Enable SPI DMA. Default: 0 (Off)");
+
+static const struct can_bittiming_const mcp251x_bittiming_const = {
+       .name = DEVICE_NAME,
+       .tseg1_min = 3,
+       .tseg1_max = 16,
+       .tseg2_min = 2,
+       .tseg2_max = 8,
+       .sjw_max = 4,
+       .brp_min = 1,
+       .brp_max = 64,
+       .brp_inc = 1,
+};
+
+enum mcp251x_model {
+       CAN_MCP251X_MCP2510     = 0x2510,
+       CAN_MCP251X_MCP2515     = 0x2515,
+};
+
+struct mcp251x_priv {
+       struct can_priv    can;
+       struct net_device *net;
+       struct spi_device *spi;
+       enum mcp251x_model model;
+
+       struct mutex mcp_lock; /* SPI device lock */
+
+       u8 *spi_tx_buf;
+       u8 *spi_rx_buf;
+       dma_addr_t spi_tx_dma;
+       dma_addr_t spi_rx_dma;
+
+       struct sk_buff *tx_skb;
+       int tx_len;
+
+       struct workqueue_struct *wq;
+       struct work_struct tx_work;
+       struct work_struct restart_work;
+
+       int force_quit;
+       int after_suspend;
+#define AFTER_SUSPEND_UP 1
+#define AFTER_SUSPEND_DOWN 2
+#define AFTER_SUSPEND_POWER 4
+#define AFTER_SUSPEND_RESTART 8
+       int restart_tx;
+       struct regulator *power;
+       struct regulator *transceiver;
+       struct clk *clk;
+};
+
+#define MCP251X_IS(_model) \
+static inline int mcp251x_is_##_model(struct spi_device *spi) \
+{ \
+       struct mcp251x_priv *priv = spi_get_drvdata(spi); \
+       return priv->model == CAN_MCP251X_MCP##_model; \
+}
+
+MCP251X_IS(2510);
+MCP251X_IS(2515);
+
+static void mcp251x_clean(struct net_device *net)
+{
+       struct mcp251x_priv *priv = netdev_priv(net);
+
+       if (priv->tx_skb || priv->tx_len)
+               net->stats.tx_errors++;
+       if (priv->tx_skb)
+               dev_kfree_skb(priv->tx_skb);
+       if (priv->tx_len)
+               can_free_echo_skb(priv->net, 0);
+       priv->tx_skb = NULL;
+       priv->tx_len = 0;
+}
+
+/*
+ * Note about handling of error return of mcp251x_spi_trans: accessing
+ * registers via SPI is not really different conceptually than using
+ * normal I/O assembler instructions, although it's much more
+ * complicated from a practical POV. So it's not advisable to always
+ * check the return value of this function. Imagine that every
+ * read{b,l}, write{b,l} and friends would be bracketed in "if ( < 0)
+ * error();", it would be a great mess (well there are some situation
+ * when exception handling C++ like could be useful after all). So we
+ * just check that transfers are OK at the beginning of our
+ * conversation with the chip and to avoid doing really nasty things
+ * (like injecting bogus packets in the network stack).
+ */
+static int mcp251x_spi_trans(struct spi_device *spi, int len)
+{
+       struct mcp251x_priv *priv = spi_get_drvdata(spi);
+       struct spi_transfer t = {
+               .tx_buf = priv->spi_tx_buf,
+               .rx_buf = priv->spi_rx_buf,
+               .len = len,
+               .cs_change = 0,
+       };
+       struct spi_message m;
+       int ret;
+
+       spi_message_init(&m);
+
+       if (mcp251x_enable_dma) {
+               t.tx_dma = priv->spi_tx_dma;
+               t.rx_dma = priv->spi_rx_dma;
+               m.is_dma_mapped = 1;
+       }
+
+       spi_message_add_tail(&t, &m);
+
+       ret = spi_sync(spi, &m);
+       if (ret)
+               dev_err(&spi->dev, "spi transfer failed: ret = %d\n", ret);
+       return ret;
+}
+
+static u8 mcp251x_read_reg(struct spi_device *spi, uint8_t reg)
+{
+       struct mcp251x_priv *priv = spi_get_drvdata(spi);
+       u8 val = 0;
+
+       priv->spi_tx_buf[0] = INSTRUCTION_READ;
+       priv->spi_tx_buf[1] = reg;
+
+       mcp251x_spi_trans(spi, 3);
+       val = priv->spi_rx_buf[2];
+
+       return val;
+}
+
+static void mcp251x_read_2regs(struct spi_device *spi, uint8_t reg,
+               uint8_t *v1, uint8_t *v2)
+{
+       struct mcp251x_priv *priv = spi_get_drvdata(spi);
+
+       priv->spi_tx_buf[0] = INSTRUCTION_READ;
+       priv->spi_tx_buf[1] = reg;
+
+       mcp251x_spi_trans(spi, 4);
+
+       *v1 = priv->spi_rx_buf[2];
+       *v2 = priv->spi_rx_buf[3];
+}
+
+static void mcp251x_write_reg(struct spi_device *spi, u8 reg, uint8_t val)
+{
+       struct mcp251x_priv *priv = spi_get_drvdata(spi);
+
+       priv->spi_tx_buf[0] = INSTRUCTION_WRITE;
+       priv->spi_tx_buf[1] = reg;
+       priv->spi_tx_buf[2] = val;
+
+       mcp251x_spi_trans(spi, 3);
+}
+
+static void mcp251x_write_bits(struct spi_device *spi, u8 reg,
+                              u8 mask, uint8_t val)
+{
+       struct mcp251x_priv *priv = spi_get_drvdata(spi);
+
+       priv->spi_tx_buf[0] = INSTRUCTION_BIT_MODIFY;
+       priv->spi_tx_buf[1] = reg;
+       priv->spi_tx_buf[2] = mask;
+       priv->spi_tx_buf[3] = val;
+
+       mcp251x_spi_trans(spi, 4);
+}
+
+static void mcp251x_hw_tx_frame(struct spi_device *spi, u8 *buf,
+                               int len, int tx_buf_idx)
+{
+       struct mcp251x_priv *priv = spi_get_drvdata(spi);
+
+       if (mcp251x_is_2510(spi)) {
+               int i;
+
+               for (i = 1; i < TXBDAT_OFF + len; i++)
+                       mcp251x_write_reg(spi, TXBCTRL(tx_buf_idx) + i,
+                                         buf[i]);
+       } else {
+               memcpy(priv->spi_tx_buf, buf, TXBDAT_OFF + len);
+               mcp251x_spi_trans(spi, TXBDAT_OFF + len);
+       }
+}
+
+static void mcp251x_hw_tx(struct spi_device *spi, struct can_frame *frame,
+                         int tx_buf_idx)
+{
+       struct mcp251x_priv *priv = spi_get_drvdata(spi);
+       u32 sid, eid, exide, rtr;
+       u8 buf[SPI_TRANSFER_BUF_LEN];
+
+       exide = (frame->can_id & CAN_EFF_FLAG) ? 1 : 0; /* Extended ID Enable */
+       if (exide)
+               sid = (frame->can_id & CAN_EFF_MASK) >> 18;
+       else
+               sid = frame->can_id & CAN_SFF_MASK; /* Standard ID */
+       eid = frame->can_id & CAN_EFF_MASK; /* Extended ID */
+       rtr = (frame->can_id & CAN_RTR_FLAG) ? 1 : 0; /* Remote transmission */
+
+       buf[TXBCTRL_OFF] = INSTRUCTION_LOAD_TXB(tx_buf_idx);
+       buf[TXBSIDH_OFF] = sid >> SIDH_SHIFT;
+       buf[TXBSIDL_OFF] = ((sid & SIDL_SID_MASK) << SIDL_SID_SHIFT) |
+               (exide << SIDL_EXIDE_SHIFT) |
+               ((eid >> SIDL_EID_SHIFT) & SIDL_EID_MASK);
+       buf[TXBEID8_OFF] = GET_BYTE(eid, 1);
+       buf[TXBEID0_OFF] = GET_BYTE(eid, 0);
+       buf[TXBDLC_OFF] = (rtr << DLC_RTR_SHIFT) | frame->can_dlc;
+       memcpy(buf + TXBDAT_OFF, frame->data, frame->can_dlc);
+       mcp251x_hw_tx_frame(spi, buf, frame->can_dlc, tx_buf_idx);
+
+       /* use INSTRUCTION_RTS, to avoid "repeated frame problem" */
+       priv->spi_tx_buf[0] = INSTRUCTION_RTS(1 << tx_buf_idx);
+       mcp251x_spi_trans(priv->spi, 1);
+}
+
+static void mcp251x_hw_rx_frame(struct spi_device *spi, u8 *buf,
+                               int buf_idx)
+{
+       struct mcp251x_priv *priv = spi_get_drvdata(spi);
+
+       if (mcp251x_is_2510(spi)) {
+               int i, len;
+
+               for (i = 1; i < RXBDAT_OFF; i++)
+                       buf[i] = mcp251x_read_reg(spi, RXBCTRL(buf_idx) + i);
+
+               len = get_can_dlc(buf[RXBDLC_OFF] & RXBDLC_LEN_MASK);
+               for (; i < (RXBDAT_OFF + len); i++)
+                       buf[i] = mcp251x_read_reg(spi, RXBCTRL(buf_idx) + i);
+       } else {
+               priv->spi_tx_buf[RXBCTRL_OFF] = INSTRUCTION_READ_RXB(buf_idx);
+               mcp251x_spi_trans(spi, SPI_TRANSFER_BUF_LEN);
+               memcpy(buf, priv->spi_rx_buf, SPI_TRANSFER_BUF_LEN);
+       }
+}
+
+static void mcp251x_hw_rx(struct spi_device *spi, int buf_idx)
+{
+       struct mcp251x_priv *priv = spi_get_drvdata(spi);
+       struct sk_buff *skb;
+       struct can_frame *frame;
+       u8 buf[SPI_TRANSFER_BUF_LEN];
+
+       skb = alloc_can_skb(priv->net, &frame);
+       if (!skb) {
+               dev_err(&spi->dev, "cannot allocate RX skb\n");
+               priv->net->stats.rx_dropped++;
+               return;
+       }
+
+       mcp251x_hw_rx_frame(spi, buf, buf_idx);
+       if (buf[RXBSIDL_OFF] & RXBSIDL_IDE) {
+               /* Extended ID format */
+               frame->can_id = CAN_EFF_FLAG;
+               frame->can_id |=
+                       /* Extended ID part */
+                       SET_BYTE(buf[RXBSIDL_OFF] & RXBSIDL_EID, 2) |
+                       SET_BYTE(buf[RXBEID8_OFF], 1) |
+                       SET_BYTE(buf[RXBEID0_OFF], 0) |
+                       /* Standard ID part */
+                       (((buf[RXBSIDH_OFF] << RXBSIDH_SHIFT) |
+                         (buf[RXBSIDL_OFF] >> RXBSIDL_SHIFT)) << 18);
+               /* Remote transmission request */
+               if (buf[RXBDLC_OFF] & RXBDLC_RTR)
+                       frame->can_id |= CAN_RTR_FLAG;
+       } else {
+               /* Standard ID format */
+               frame->can_id =
+                       (buf[RXBSIDH_OFF] << RXBSIDH_SHIFT) |
+                       (buf[RXBSIDL_OFF] >> RXBSIDL_SHIFT);
+               if (buf[RXBSIDL_OFF] & RXBSIDL_SRR)
+                       frame->can_id |= CAN_RTR_FLAG;
+       }
+       /* Data length */
+       frame->can_dlc = get_can_dlc(buf[RXBDLC_OFF] & RXBDLC_LEN_MASK);
+       memcpy(frame->data, buf + RXBDAT_OFF, frame->can_dlc);
+
+       priv->net->stats.rx_packets++;
+       priv->net->stats.rx_bytes += frame->can_dlc;
+
+       can_led_event(priv->net, CAN_LED_EVENT_RX);
+
+       netif_rx_ni(skb);
+}
+
+static void mcp251x_hw_sleep(struct spi_device *spi)
+{
+       mcp251x_write_reg(spi, CANCTRL, CANCTRL_REQOP_SLEEP);
+}
+
+static netdev_tx_t mcp251x_hard_start_xmit(struct sk_buff *skb,
+                                          struct net_device *net)
+{
+       struct mcp251x_priv *priv = netdev_priv(net);
+       struct spi_device *spi = priv->spi;
+
+       if (priv->tx_skb || priv->tx_len) {
+               dev_warn(&spi->dev, "hard_xmit called while tx busy\n");
+               return NETDEV_TX_BUSY;
+       }
+
+       if (can_dropped_invalid_skb(net, skb))
+               return NETDEV_TX_OK;
+
+       netif_stop_queue(net);
+       priv->tx_skb = skb;
+       queue_work(priv->wq, &priv->tx_work);
+
+       return NETDEV_TX_OK;
+}
+
+static int mcp251x_do_set_mode(struct net_device *net, enum can_mode mode)
+{
+       struct mcp251x_priv *priv = netdev_priv(net);
+
+       switch (mode) {
+       case CAN_MODE_START:
+               mcp251x_clean(net);
+               /* We have to delay work since SPI I/O may sleep */
+               priv->can.state = CAN_STATE_ERROR_ACTIVE;
+               priv->restart_tx = 1;
+               if (priv->can.restart_ms == 0)
+                       priv->after_suspend = AFTER_SUSPEND_RESTART;
+               queue_work(priv->wq, &priv->restart_work);
+               break;
+       default:
+               return -EOPNOTSUPP;
+       }
+
+       return 0;
+}
+
+static int mcp251x_set_normal_mode(struct spi_device *spi)
+{
+       struct mcp251x_priv *priv = spi_get_drvdata(spi);
+       unsigned long timeout;
+
+       /* Enable interrupts */
+       mcp251x_write_reg(spi, CANINTE,
+                         CANINTE_ERRIE | CANINTE_TX2IE | CANINTE_TX1IE |
+                         CANINTE_TX0IE | CANINTE_RX1IE | CANINTE_RX0IE);
+
+       if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) {
+               /* Put device into loopback mode */
+               mcp251x_write_reg(spi, CANCTRL, CANCTRL_REQOP_LOOPBACK);
+       } else if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) {
+               /* Put device into listen-only mode */
+               mcp251x_write_reg(spi, CANCTRL, CANCTRL_REQOP_LISTEN_ONLY);
+       } else {
+               /* Put device into normal mode */
+               mcp251x_write_reg(spi, CANCTRL, CANCTRL_REQOP_NORMAL);
+
+               /* Wait for the device to enter normal mode */
+               timeout = jiffies + HZ;
+               while (mcp251x_read_reg(spi, CANSTAT) & CANCTRL_REQOP_MASK) {
+                       schedule();
+                       if (time_after(jiffies, timeout)) {
+                               dev_err(&spi->dev, "MCP251x didn't"
+                                       " enter in normal mode\n");
+                               return -EBUSY;
+                       }
+               }
+       }
+       priv->can.state = CAN_STATE_ERROR_ACTIVE;
+       return 0;
+}
+
+static int mcp251x_do_set_bittiming(struct net_device *net)
+{
+       struct mcp251x_priv *priv = netdev_priv(net);
+       struct can_bittiming *bt = &priv->can.bittiming;
+       struct spi_device *spi = priv->spi;
+
+       mcp251x_write_reg(spi, CNF1, ((bt->sjw - 1) << CNF1_SJW_SHIFT) |
+                         (bt->brp - 1));
+       mcp251x_write_reg(spi, CNF2, CNF2_BTLMODE |
+                         (priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES ?
+                          CNF2_SAM : 0) |
+                         ((bt->phase_seg1 - 1) << CNF2_PS1_SHIFT) |
+                         (bt->prop_seg - 1));
+       mcp251x_write_bits(spi, CNF3, CNF3_PHSEG2_MASK,
+                          (bt->phase_seg2 - 1));
+       dev_dbg(&spi->dev, "CNF: 0x%02x 0x%02x 0x%02x\n",
+               mcp251x_read_reg(spi, CNF1),
+               mcp251x_read_reg(spi, CNF2),
+               mcp251x_read_reg(spi, CNF3));
+
+       return 0;
+}
+
+static int mcp251x_setup(struct net_device *net, struct mcp251x_priv *priv,
+                        struct spi_device *spi)
+{
+       mcp251x_do_set_bittiming(net);
+
+       mcp251x_write_reg(spi, RXBCTRL(0),
+                         RXBCTRL_BUKT | RXBCTRL_RXM0 | RXBCTRL_RXM1);
+       mcp251x_write_reg(spi, RXBCTRL(1),
+                         RXBCTRL_RXM0 | RXBCTRL_RXM1);
+       return 0;
+}
+
+static int mcp251x_hw_reset(struct spi_device *spi)
+{
+       struct mcp251x_priv *priv = spi_get_drvdata(spi);
+       u8 reg;
+       int ret;
+
+       /* Wait for oscillator startup timer after power up */
+       mdelay(MCP251X_OST_DELAY_MS);
+
+       priv->spi_tx_buf[0] = INSTRUCTION_RESET;
+       ret = mcp251x_spi_trans(spi, 1);
+       if (ret)
+               return ret;
+
+       /* Wait for oscillator startup timer after reset */
+       mdelay(MCP251X_OST_DELAY_MS);
+       
+       reg = mcp251x_read_reg(spi, CANSTAT);
+       if ((reg & CANCTRL_REQOP_MASK) != CANCTRL_REQOP_CONF)
+               return -ENODEV;
+
+       return 0;
+}
+
+static int mcp251x_hw_probe(struct spi_device *spi)
+{
+       u8 ctrl;
+       int ret;
+
+       ret = mcp251x_hw_reset(spi);
+       if (ret)
+               return ret;
+
+       ctrl = mcp251x_read_reg(spi, CANCTRL);
+
+       dev_dbg(&spi->dev, "CANCTRL 0x%02x\n", ctrl);
+
+       /* Check for power up default value */
+       if ((ctrl & 0x17) != 0x07)
+               return -ENODEV;
+
+       return 0;
+}
+
+static int mcp251x_power_enable(struct regulator *reg, int enable)
+{
+       if (IS_ERR_OR_NULL(reg))
+               return 0;
+
+       if (enable)
+               return regulator_enable(reg);
+       else
+               return regulator_disable(reg);
+}
+
+static void mcp251x_open_clean(struct net_device *net)
+{
+       struct mcp251x_priv *priv = netdev_priv(net);
+       struct spi_device *spi = priv->spi;
+
+       free_irq(spi->irq, priv);
+       mcp251x_hw_sleep(spi);
+       mcp251x_power_enable(priv->transceiver, 0);
+       close_candev(net);
+}
+
+static int mcp251x_stop(struct net_device *net)
+{
+       struct mcp251x_priv *priv = netdev_priv(net);
+       struct spi_device *spi = priv->spi;
+
+       close_candev(net);
+
+       priv->force_quit = 1;
+       free_irq(spi->irq, priv);
+       destroy_workqueue(priv->wq);
+       priv->wq = NULL;
+
+       mutex_lock(&priv->mcp_lock);
+
+       /* Disable and clear pending interrupts */
+       mcp251x_write_reg(spi, CANINTE, 0x00);
+       mcp251x_write_reg(spi, CANINTF, 0x00);
+
+       mcp251x_write_reg(spi, TXBCTRL(0), 0);
+       mcp251x_clean(net);
+
+       mcp251x_hw_sleep(spi);
+
+       mcp251x_power_enable(priv->transceiver, 0);
+
+       priv->can.state = CAN_STATE_STOPPED;
+
+       mutex_unlock(&priv->mcp_lock);
+
+       can_led_event(net, CAN_LED_EVENT_STOP);
+
+       return 0;
+}
+
+static void mcp251x_error_skb(struct net_device *net, int can_id, int data1)
+{
+       struct sk_buff *skb;
+       struct can_frame *frame;
+
+       skb = alloc_can_err_skb(net, &frame);
+       if (skb) {
+               frame->can_id |= can_id;
+               frame->data[1] = data1;
+               netif_rx_ni(skb);
+       } else {
+               netdev_err(net, "cannot allocate error skb\n");
+       }
+}
+
+static void mcp251x_tx_work_handler(struct work_struct *ws)
+{
+       struct mcp251x_priv *priv = container_of(ws, struct mcp251x_priv,
+                                                tx_work);
+       struct spi_device *spi = priv->spi;
+       struct net_device *net = priv->net;
+       struct can_frame *frame;
+
+       mutex_lock(&priv->mcp_lock);
+       if (priv->tx_skb) {
+               if (priv->can.state == CAN_STATE_BUS_OFF) {
+                       mcp251x_clean(net);
+               } else {
+                       frame = (struct can_frame *)priv->tx_skb->data;
+
+                       if (frame->can_dlc > CAN_FRAME_MAX_DATA_LEN)
+                               frame->can_dlc = CAN_FRAME_MAX_DATA_LEN;
+                       mcp251x_hw_tx(spi, frame, 0);
+                       priv->tx_len = 1 + frame->can_dlc;
+                       can_put_echo_skb(priv->tx_skb, net, 0);
+                       priv->tx_skb = NULL;
+               }
+       }
+       mutex_unlock(&priv->mcp_lock);
+}
+
+static void mcp251x_restart_work_handler(struct work_struct *ws)
+{
+       struct mcp251x_priv *priv = container_of(ws, struct mcp251x_priv,
+                                                restart_work);
+       struct spi_device *spi = priv->spi;
+       struct net_device *net = priv->net;
+
+       mutex_lock(&priv->mcp_lock);
+       if (priv->after_suspend) {
+               mcp251x_hw_reset(spi);
+               mcp251x_setup(net, priv, spi);
+               if (priv->after_suspend & AFTER_SUSPEND_RESTART) {
+                       mcp251x_set_normal_mode(spi);
+               } else if (priv->after_suspend & AFTER_SUSPEND_UP) {
+                       netif_device_attach(net);
+                       mcp251x_clean(net);
+                       mcp251x_set_normal_mode(spi);
+                       netif_wake_queue(net);
+               } else {
+                       mcp251x_hw_sleep(spi);
+               }
+               priv->after_suspend = 0;
+               priv->force_quit = 0;
+       }
+
+       if (priv->restart_tx) {
+               priv->restart_tx = 0;
+               mcp251x_write_reg(spi, TXBCTRL(0), 0);
+               mcp251x_clean(net);
+               netif_wake_queue(net);
+               mcp251x_error_skb(net, CAN_ERR_RESTARTED, 0);
+       }
+       mutex_unlock(&priv->mcp_lock);
+}
+
+static irqreturn_t mcp251x_can_ist(int irq, void *dev_id)
+{
+       struct mcp251x_priv *priv = dev_id;
+       struct spi_device *spi = priv->spi;
+       struct net_device *net = priv->net;
+
+       mutex_lock(&priv->mcp_lock);
+       while (!priv->force_quit) {
+               enum can_state new_state;
+               u8 intf, eflag;
+               u8 clear_intf = 0;
+               int can_id = 0, data1 = 0;
+
+               mcp251x_read_2regs(spi, CANINTF, &intf, &eflag);
+
+               /* mask out flags we don't care about */
+               intf &= CANINTF_RX | CANINTF_TX | CANINTF_ERR;
+
+               /* receive buffer 0 */
+               if (intf & CANINTF_RX0IF) {
+                       mcp251x_hw_rx(spi, 0);
+                       /*
+                        * Free one buffer ASAP
+                        * (The MCP2515 does this automatically.)
+                        */
+                       if (mcp251x_is_2510(spi))
+                               mcp251x_write_bits(spi, CANINTF, CANINTF_RX0IF, 0x00);
+               }
+
+               /* receive buffer 1 */
+               if (intf & CANINTF_RX1IF) {
+                       mcp251x_hw_rx(spi, 1);
+                       /* the MCP2515 does this automatically */
+                       if (mcp251x_is_2510(spi))
+                               clear_intf |= CANINTF_RX1IF;
+               }
+
+               /* any error or tx interrupt we need to clear? */
+               if (intf & (CANINTF_ERR | CANINTF_TX))
+                       clear_intf |= intf & (CANINTF_ERR | CANINTF_TX);
+               if (clear_intf)
+                       mcp251x_write_bits(spi, CANINTF, clear_intf, 0x00);
+
+               if (eflag)
+                       mcp251x_write_bits(spi, EFLG, eflag, 0x00);
+
+               /* Update can state */
+               if (eflag & EFLG_TXBO) {
+                       new_state = CAN_STATE_BUS_OFF;
+                       can_id |= CAN_ERR_BUSOFF;
+               } else if (eflag & EFLG_TXEP) {
+                       new_state = CAN_STATE_ERROR_PASSIVE;
+                       can_id |= CAN_ERR_CRTL;
+                       data1 |= CAN_ERR_CRTL_TX_PASSIVE;
+               } else if (eflag & EFLG_RXEP) {
+                       new_state = CAN_STATE_ERROR_PASSIVE;
+                       can_id |= CAN_ERR_CRTL;
+                       data1 |= CAN_ERR_CRTL_RX_PASSIVE;
+               } else if (eflag & EFLG_TXWAR) {
+                       new_state = CAN_STATE_ERROR_WARNING;
+                       can_id |= CAN_ERR_CRTL;
+                       data1 |= CAN_ERR_CRTL_TX_WARNING;
+               } else if (eflag & EFLG_RXWAR) {
+                       new_state = CAN_STATE_ERROR_WARNING;
+                       can_id |= CAN_ERR_CRTL;
+                       data1 |= CAN_ERR_CRTL_RX_WARNING;
+               } else {
+                       new_state = CAN_STATE_ERROR_ACTIVE;
+               }
+
+               /* Update can state statistics */
+               switch (priv->can.state) {
+               case CAN_STATE_ERROR_ACTIVE:
+                       if (new_state >= CAN_STATE_ERROR_WARNING &&
+                           new_state <= CAN_STATE_BUS_OFF)
+                               priv->can.can_stats.error_warning++;
+               case CAN_STATE_ERROR_WARNING:   /* fallthrough */
+                       if (new_state >= CAN_STATE_ERROR_PASSIVE &&
+                           new_state <= CAN_STATE_BUS_OFF)
+                               priv->can.can_stats.error_passive++;
+                       break;
+               default:
+                       break;
+               }
+               priv->can.state = new_state;
+
+               if (intf & CANINTF_ERRIF) {
+                       /* Handle overflow counters */
+                       if (eflag & (EFLG_RX0OVR | EFLG_RX1OVR)) {
+                               if (eflag & EFLG_RX0OVR) {
+                                       net->stats.rx_over_errors++;
+                                       net->stats.rx_errors++;
+                               }
+                               if (eflag & EFLG_RX1OVR) {
+                                       net->stats.rx_over_errors++;
+                                       net->stats.rx_errors++;
+                               }
+                               can_id |= CAN_ERR_CRTL;
+                               data1 |= CAN_ERR_CRTL_RX_OVERFLOW;
+                       }
+                       mcp251x_error_skb(net, can_id, data1);
+               }
+
+               if (priv->can.state == CAN_STATE_BUS_OFF) {
+                       if (priv->can.restart_ms == 0) {
+                               priv->force_quit = 1;
+                               can_bus_off(net);
+                               mcp251x_hw_sleep(spi);
+                               break;
+                       }
+               }
+
+               if (intf == 0)
+                       break;
+
+               if (intf & CANINTF_TX) {
+                       net->stats.tx_packets++;
+                       net->stats.tx_bytes += priv->tx_len - 1;
+                       can_led_event(net, CAN_LED_EVENT_TX);
+                       if (priv->tx_len) {
+                               can_get_echo_skb(net, 0);
+                               priv->tx_len = 0;
+                       }
+                       netif_wake_queue(net);
+               }
+
+       }
+       mutex_unlock(&priv->mcp_lock);
+       return IRQ_HANDLED;
+}
+
+static int mcp251x_open(struct net_device *net)
+{
+       struct mcp251x_priv *priv = netdev_priv(net);
+       struct spi_device *spi = priv->spi;
+       unsigned long flags = IRQF_ONESHOT | IRQF_TRIGGER_FALLING;
+       int ret;
+
+       ret = open_candev(net);
+       if (ret) {
+               dev_err(&spi->dev, "unable to set initial baudrate!\n");
+               return ret;
+       }
+
+       mutex_lock(&priv->mcp_lock);
+       mcp251x_power_enable(priv->transceiver, 1);
+
+       priv->force_quit = 0;
+       priv->tx_skb = NULL;
+       priv->tx_len = 0;
+
+       ret = request_threaded_irq(spi->irq, NULL, mcp251x_can_ist,
+                                  flags | IRQF_ONESHOT, DEVICE_NAME, priv);
+       if (ret) {
+               dev_err(&spi->dev, "failed to acquire irq %d\n", spi->irq);
+               mcp251x_power_enable(priv->transceiver, 0);
+               close_candev(net);
+               goto open_unlock;
+       }
+
+       priv->wq = create_freezable_workqueue("mcp251x_wq");
+       INIT_WORK(&priv->tx_work, mcp251x_tx_work_handler);
+       INIT_WORK(&priv->restart_work, mcp251x_restart_work_handler);
+
+       ret = mcp251x_hw_reset(spi);
+       if (ret) {
+               mcp251x_open_clean(net);
+               goto open_unlock;
+       }
+       ret = mcp251x_setup(net, priv, spi);
+       if (ret) {
+               mcp251x_open_clean(net);
+               goto open_unlock;
+       }
+       ret = mcp251x_set_normal_mode(spi);
+       if (ret) {
+               mcp251x_open_clean(net);
+               goto open_unlock;
+       }
+
+       can_led_event(net, CAN_LED_EVENT_OPEN);
+
+       netif_wake_queue(net);
+
+open_unlock:
+       mutex_unlock(&priv->mcp_lock);
+       return ret;
+}
+
+static const struct net_device_ops mcp251x_netdev_ops = {
+       .ndo_open = mcp251x_open,
+       .ndo_stop = mcp251x_stop,
+       .ndo_start_xmit = mcp251x_hard_start_xmit,
+       .ndo_change_mtu = can_change_mtu,
+};
+
+static const struct of_device_id mcp251x_of_match[] = {
+       {
+               .compatible     = "microchip,mcp2510",
+               .data           = (void *)CAN_MCP251X_MCP2510,
+       },
+       {
+               .compatible     = "microchip,mcp2515",
+               .data           = (void *)CAN_MCP251X_MCP2515,
+       },
+       { }
+};
+MODULE_DEVICE_TABLE(of, mcp251x_of_match);
+
+static const struct spi_device_id mcp251x_id_table[] = {
+       {
+               .name           = "mcp2510",
+               .driver_data    = (kernel_ulong_t)CAN_MCP251X_MCP2510,
+       },
+       {
+               .name           = "mcp2515",
+               .driver_data    = (kernel_ulong_t)CAN_MCP251X_MCP2515,
+       },
+       { }
+};
+MODULE_DEVICE_TABLE(spi, mcp251x_id_table);
+
+static int mcp251x_can_probe(struct spi_device *spi)
+{
+       const struct of_device_id *of_id = of_match_device(mcp251x_of_match,
+                                                          &spi->dev);
+       struct mcp251x_platform_data *pdata = dev_get_platdata(&spi->dev);
+       struct net_device *net;
+       struct mcp251x_priv *priv;
+       struct clk *clk;
+       int freq, ret;
+
+       clk = devm_clk_get(&spi->dev, NULL);
+       if (IS_ERR(clk)) {
+               if (pdata)
+                       freq = pdata->oscillator_frequency;
+               else
+                       return PTR_ERR(clk);
+       } else {
+               freq = clk_get_rate(clk);
+       }
+
+       /* Sanity check */
+       if (freq < 1000000 || freq > 25000000)
+               return -ERANGE;
+
+       /* Allocate can/net device */
+       net = alloc_candev(sizeof(struct mcp251x_priv), TX_ECHO_SKB_MAX);
+       if (!net)
+               return -ENOMEM;
+
+       if (!IS_ERR(clk)) {
+               ret = clk_prepare_enable(clk);
+               if (ret)
+                       goto out_free;
+       }
+
+       net->netdev_ops = &mcp251x_netdev_ops;
+       net->flags |= IFF_ECHO;
+
+       priv = netdev_priv(net);
+       priv->can.bittiming_const = &mcp251x_bittiming_const;
+       priv->can.do_set_mode = mcp251x_do_set_mode;
+       priv->can.clock.freq = freq / 2;
+       priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES |
+               CAN_CTRLMODE_LOOPBACK | CAN_CTRLMODE_LISTENONLY;
+       if (of_id)
+               priv->model = (enum mcp251x_model)of_id->data;
+       else
+               priv->model = spi_get_device_id(spi)->driver_data;
+       priv->net = net;
+       priv->clk = clk;
+
+       spi_set_drvdata(spi, priv);
+
+       /* Configure the SPI bus */
+       spi->bits_per_word = 8;
+       if (mcp251x_is_2510(spi))
+               spi->max_speed_hz = spi->max_speed_hz ? : 5 * 1000 * 1000;
+       else
+               spi->max_speed_hz = spi->max_speed_hz ? : 10 * 1000 * 1000;
+       ret = spi_setup(spi);
+       if (ret)
+               goto out_clk;
+
+       priv->power = devm_regulator_get(&spi->dev, "vdd");
+       priv->transceiver = devm_regulator_get(&spi->dev, "xceiver");
+       if ((PTR_ERR(priv->power) == -EPROBE_DEFER) ||
+           (PTR_ERR(priv->transceiver) == -EPROBE_DEFER)) {
+               ret = -EPROBE_DEFER;
+               goto out_clk;
+       }
+
+       ret = mcp251x_power_enable(priv->power, 1);
+       if (ret)
+               goto out_clk;
+
+       priv->spi = spi;
+       mutex_init(&priv->mcp_lock);
+
+       /* If requested, allocate DMA buffers */
+       if (mcp251x_enable_dma) {
+               spi->dev.coherent_dma_mask = ~0;
+
+               /*
+                * Minimum coherent DMA allocation is PAGE_SIZE, so allocate
+                * that much and share it between Tx and Rx DMA buffers.
+                */
+               priv->spi_tx_buf = dma_alloc_coherent(&spi->dev,
+                                                     PAGE_SIZE,
+                                                     &priv->spi_tx_dma,
+                                                     GFP_DMA);
+
+               if (priv->spi_tx_buf) {
+                       priv->spi_rx_buf = (priv->spi_tx_buf + (PAGE_SIZE / 2));
+                       priv->spi_rx_dma = (dma_addr_t)(priv->spi_tx_dma +
+                                                       (PAGE_SIZE / 2));
+               } else {
+                       /* Fall back to non-DMA */
+                       mcp251x_enable_dma = 0;
+               }
+       }
+
+       /* Allocate non-DMA buffers */
+       if (!mcp251x_enable_dma) {
+               priv->spi_tx_buf = devm_kzalloc(&spi->dev, SPI_TRANSFER_BUF_LEN,
+                                               GFP_KERNEL);
+               if (!priv->spi_tx_buf) {
+                       ret = -ENOMEM;
+                       goto error_probe;
+               }
+               priv->spi_rx_buf = devm_kzalloc(&spi->dev, SPI_TRANSFER_BUF_LEN,
+                                               GFP_KERNEL);
+               if (!priv->spi_rx_buf) {
+                       ret = -ENOMEM;
+                       goto error_probe;
+               }
+       }
+
+       SET_NETDEV_DEV(net, &spi->dev);
+
+       /* Here is OK to not lock the MCP, no one knows about it yet */
+       ret = mcp251x_hw_probe(spi);
+       if (ret)
+               goto error_probe;
+
+       mcp251x_hw_sleep(spi);
+
+       ret = register_candev(net);
+       if (ret)
+               goto error_probe;
+
+       devm_can_led_init(net);
+
+       return 0;
+
+error_probe:
+       if (mcp251x_enable_dma)
+               dma_free_coherent(&spi->dev, PAGE_SIZE,
+                                 priv->spi_tx_buf, priv->spi_tx_dma);
+       mcp251x_power_enable(priv->power, 0);
+
+out_clk:
+       if (!IS_ERR(clk))
+               clk_disable_unprepare(clk);
+
+out_free:
+       free_candev(net);
+
+       return ret;
+}
+
+static int mcp251x_can_remove(struct spi_device *spi)
+{
+       struct mcp251x_priv *priv = spi_get_drvdata(spi);
+       struct net_device *net = priv->net;
+
+       unregister_candev(net);
+
+       if (mcp251x_enable_dma) {
+               dma_free_coherent(&spi->dev, PAGE_SIZE,
+                                 priv->spi_tx_buf, priv->spi_tx_dma);
+       }
+
+       mcp251x_power_enable(priv->power, 0);
+
+       if (!IS_ERR(priv->clk))
+               clk_disable_unprepare(priv->clk);
+
+       free_candev(net);
+
+       return 0;
+}
+
+static int __maybe_unused mcp251x_can_suspend(struct device *dev)
+{
+       struct spi_device *spi = to_spi_device(dev);
+       struct mcp251x_priv *priv = spi_get_drvdata(spi);
+       struct net_device *net = priv->net;
+
+       priv->force_quit = 1;
+       disable_irq(spi->irq);
+       /*
+        * Note: at this point neither IST nor workqueues are running.
+        * open/stop cannot be called anyway so locking is not needed
+        */
+       if (netif_running(net)) {
+               netif_device_detach(net);
+
+               mcp251x_hw_sleep(spi);
+               mcp251x_power_enable(priv->transceiver, 0);
+               priv->after_suspend = AFTER_SUSPEND_UP;
+       } else {
+               priv->after_suspend = AFTER_SUSPEND_DOWN;
+       }
+
+       if (!IS_ERR_OR_NULL(priv->power)) {
+               regulator_disable(priv->power);
+               priv->after_suspend |= AFTER_SUSPEND_POWER;
+       }
+
+       return 0;
+}
+
+static int __maybe_unused mcp251x_can_resume(struct device *dev)
+{
+       struct spi_device *spi = to_spi_device(dev);
+       struct mcp251x_priv *priv = spi_get_drvdata(spi);
+
+       if (priv->after_suspend & AFTER_SUSPEND_POWER) {
+               mcp251x_power_enable(priv->power, 1);
+               queue_work(priv->wq, &priv->restart_work);
+       } else {
+               if (priv->after_suspend & AFTER_SUSPEND_UP) {
+                       mcp251x_power_enable(priv->transceiver, 1);
+                       queue_work(priv->wq, &priv->restart_work);
+               } else {
+                       priv->after_suspend = 0;
+               }
+       }
+       priv->force_quit = 0;
+       enable_irq(spi->irq);
+       return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(mcp251x_can_pm_ops, mcp251x_can_suspend,
+       mcp251x_can_resume);
+
+static struct spi_driver mcp251x_can_driver = {
+       .driver = {
+               .name = DEVICE_NAME,
+               .owner = THIS_MODULE,
+               .of_match_table = mcp251x_of_match,
+               .pm = &mcp251x_can_pm_ops,
+       },
+       .id_table = mcp251x_id_table,
+       .probe = mcp251x_can_probe,
+       .remove = mcp251x_can_remove,
+};
+module_spi_driver(mcp251x_can_driver);
+
+MODULE_AUTHOR("Chris Elston <celston@katalix.com>, "
+             "Christian Pellegrin <chripell@evolware.org>");
+MODULE_DESCRIPTION("Microchip 251x CAN driver");
+MODULE_LICENSE("GPL v2");
index fc96a3d83ebecde338cdc5fd1d7b12f5ee9fbc25..a77db919363c08baa2c76dbb35c60701ea2c68ff 100644 (file)
@@ -13,13 +13,21 @@ config CAN_ESD_USB2
           This driver supports the CAN-USB/2 interface
           from esd electronic system design gmbh (http://www.esd.eu).
 
+config CAN_GS_USB
+       tristate "Geschwister Schneider UG interfaces"
+       ---help---
+         This driver supports the Geschwister Schneider USB/CAN devices.
+         If unsure choose N,
+         choose Y for built in support,
+         M to compile as module (module will be named: gs_usb).
+
 config CAN_KVASER_USB
        tristate "Kvaser CAN/USB interface"
        ---help---
          This driver adds support for Kvaser CAN/USB devices like Kvaser
          Leaf Light.
 
-         The driver gives support for the following devices:
+         The driver provides support for the following devices:
            - Kvaser Leaf Light
            - Kvaser Leaf Professional HS
            - Kvaser Leaf SemiPro HS
@@ -36,6 +44,8 @@ config CAN_KVASER_USB
            - Kvaser Leaf Light "China"
            - Kvaser BlackBird SemiPro
            - Kvaser USBcan R
+           - Kvaser Leaf Light v2
+           - Kvaser Mini PCI Express HS
 
          If unsure, say N.
 
index becef460a91aeb28851e91752b67ddec84ef931c..7b9a393b1ac82a1caf4684a704d7121ad16dc3c9 100644 (file)
@@ -4,6 +4,7 @@
 
 obj-$(CONFIG_CAN_EMS_USB) += ems_usb.o
 obj-$(CONFIG_CAN_ESD_USB2) += esd_usb2.o
+obj-$(CONFIG_CAN_GS_USB) += gs_usb.o
 obj-$(CONFIG_CAN_KVASER_USB) += kvaser_usb.o
 obj-$(CONFIG_CAN_PEAK_USB) += peak_usb/
 obj-$(CONFIG_CAN_8DEV_USB) += usb_8dev.o
diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c
new file mode 100644 (file)
index 0000000..04b0f84
--- /dev/null
@@ -0,0 +1,971 @@
+/* CAN driver for Geschwister Schneider USB/CAN devices.
+ *
+ * Copyright (C) 2013 Geschwister Schneider Technologie-,
+ * Entwicklungs- und Vertriebs UG (Haftungsbeschränkt).
+ *
+ * Many thanks to all socketcan devs!
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/signal.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/usb.h>
+
+#include <linux/can.h>
+#include <linux/can/dev.h>
+#include <linux/can/error.h>
+
+/* Device specific constants */
+#define USB_GSUSB_1_VENDOR_ID      0x1d50
+#define USB_GSUSB_1_PRODUCT_ID     0x606f
+
+#define GSUSB_ENDPOINT_IN          1
+#define GSUSB_ENDPOINT_OUT         2
+
+/* Device specific constants */
+enum gs_usb_breq {
+       GS_USB_BREQ_HOST_FORMAT = 0,
+       GS_USB_BREQ_BITTIMING,
+       GS_USB_BREQ_MODE,
+       GS_USB_BREQ_BERR,
+       GS_USB_BREQ_BT_CONST,
+       GS_USB_BREQ_DEVICE_CONFIG
+};
+
+enum gs_can_mode {
+       /* reset a channel. turns it off */
+       GS_CAN_MODE_RESET = 0,
+       /* starts a channel */
+       GS_CAN_MODE_START
+};
+
+enum gs_can_state {
+       GS_CAN_STATE_ERROR_ACTIVE = 0,
+       GS_CAN_STATE_ERROR_WARNING,
+       GS_CAN_STATE_ERROR_PASSIVE,
+       GS_CAN_STATE_BUS_OFF,
+       GS_CAN_STATE_STOPPED,
+       GS_CAN_STATE_SLEEPING
+};
+
+/* data types passed between host and device */
+struct gs_host_config {
+       u32 byte_order;
+} __packed;
+/* All data exchanged between host and device is exchanged in host byte order,
+ * thanks to the struct gs_host_config byte_order member, which is sent first
+ * to indicate the desired byte order.
+ */
+
+struct gs_device_config {
+       u8 reserved1;
+       u8 reserved2;
+       u8 reserved3;
+       u8 icount;
+       u32 sw_version;
+       u32 hw_version;
+} __packed;
+
+#define GS_CAN_MODE_NORMAL               0
+#define GS_CAN_MODE_LISTEN_ONLY          (1<<0)
+#define GS_CAN_MODE_LOOP_BACK            (1<<1)
+#define GS_CAN_MODE_TRIPLE_SAMPLE        (1<<2)
+#define GS_CAN_MODE_ONE_SHOT             (1<<3)
+
+struct gs_device_mode {
+       u32 mode;
+       u32 flags;
+} __packed;
+
+struct gs_device_state {
+       u32 state;
+       u32 rxerr;
+       u32 txerr;
+} __packed;
+
+struct gs_device_bittiming {
+       u32 prop_seg;
+       u32 phase_seg1;
+       u32 phase_seg2;
+       u32 sjw;
+       u32 brp;
+} __packed;
+
+#define GS_CAN_FEATURE_LISTEN_ONLY      (1<<0)
+#define GS_CAN_FEATURE_LOOP_BACK        (1<<1)
+#define GS_CAN_FEATURE_TRIPLE_SAMPLE    (1<<2)
+#define GS_CAN_FEATURE_ONE_SHOT         (1<<3)
+
+struct gs_device_bt_const {
+       u32 feature;
+       u32 fclk_can;
+       u32 tseg1_min;
+       u32 tseg1_max;
+       u32 tseg2_min;
+       u32 tseg2_max;
+       u32 sjw_max;
+       u32 brp_min;
+       u32 brp_max;
+       u32 brp_inc;
+} __packed;
+
+#define GS_CAN_FLAG_OVERFLOW 1
+
+struct gs_host_frame {
+       u32 echo_id;
+       u32 can_id;
+
+       u8 can_dlc;
+       u8 channel;
+       u8 flags;
+       u8 reserved;
+
+       u8 data[8];
+} __packed;
+/* The GS USB devices make use of the same flags and masks as in
+ * linux/can.h and linux/can/error.h, and no additional mapping is necessary.
+ */
+
+/* Only send a max of GS_MAX_TX_URBS frames per channel at a time. */
+#define GS_MAX_TX_URBS 10
+/* Only launch a max of GS_MAX_RX_URBS usb requests at a time. */
+#define GS_MAX_RX_URBS 30
+/* Maximum number of interfaces the driver supports per device.
+ * Current hardware only supports 2 interfaces. The future may vary.
+ */
+#define GS_MAX_INTF 2
+
+struct gs_tx_context {
+       struct gs_can *dev;
+       unsigned int echo_id;
+};
+
+struct gs_can {
+       struct can_priv can; /* must be the first member */
+
+       struct gs_usb *parent;
+
+       struct net_device *netdev;
+       struct usb_device *udev;
+       struct usb_interface *iface;
+
+       struct can_bittiming_const bt_const;
+       unsigned int channel;   /* channel number */
+
+       /* This lock prevents a race condition between xmit and recieve. */
+       spinlock_t tx_ctx_lock;
+       struct gs_tx_context tx_context[GS_MAX_TX_URBS];
+
+       struct usb_anchor tx_submitted;
+       atomic_t active_tx_urbs;
+};
+
+/* usb interface struct */
+struct gs_usb {
+       struct gs_can *canch[GS_MAX_INTF];
+       struct usb_anchor rx_submitted;
+       atomic_t active_channels;
+       struct usb_device *udev;
+};
+
+/* 'allocate' a tx context.
+ * returns a valid tx context or NULL if there is no space.
+ */
+static struct gs_tx_context *gs_alloc_tx_context(struct gs_can *dev)
+{
+       int i = 0;
+       unsigned long flags;
+
+       spin_lock_irqsave(&dev->tx_ctx_lock, flags);
+
+       for (; i < GS_MAX_TX_URBS; i++) {
+               if (dev->tx_context[i].echo_id == GS_MAX_TX_URBS) {
+                       dev->tx_context[i].echo_id = i;
+                       spin_unlock_irqrestore(&dev->tx_ctx_lock, flags);
+                       return &dev->tx_context[i];
+               }
+       }
+
+       spin_unlock_irqrestore(&dev->tx_ctx_lock, flags);
+       return NULL;
+}
+
+/* releases a tx context
+ */
+static void gs_free_tx_context(struct gs_tx_context *txc)
+{
+       txc->echo_id = GS_MAX_TX_URBS;
+}
+
+/* Get a tx context by id.
+ */
+static struct gs_tx_context *gs_get_tx_context(struct gs_can *dev, unsigned int id)
+{
+       unsigned long flags;
+
+       if (id < GS_MAX_TX_URBS) {
+               spin_lock_irqsave(&dev->tx_ctx_lock, flags);
+               if (dev->tx_context[id].echo_id == id) {
+                       spin_unlock_irqrestore(&dev->tx_ctx_lock, flags);
+                       return &dev->tx_context[id];
+               }
+               spin_unlock_irqrestore(&dev->tx_ctx_lock, flags);
+       }
+       return NULL;
+}
+
+static int gs_cmd_reset(struct gs_usb *gsusb, struct gs_can *gsdev)
+{
+       struct gs_device_mode *dm;
+       struct usb_interface *intf = gsdev->iface;
+       int rc;
+
+       dm = kzalloc(sizeof(*dm), GFP_KERNEL);
+       if (!dm)
+               return -ENOMEM;
+
+       dm->mode = GS_CAN_MODE_RESET;
+
+       rc = usb_control_msg(interface_to_usbdev(intf),
+                            usb_sndctrlpipe(interface_to_usbdev(intf), 0),
+                            GS_USB_BREQ_MODE,
+                            USB_DIR_OUT|USB_TYPE_VENDOR|USB_RECIP_INTERFACE,
+                            gsdev->channel,
+                            0,
+                            dm,
+                            sizeof(*dm),
+                            1000);
+
+       return rc;
+}
+
+static void gs_update_state(struct gs_can *dev, struct can_frame *cf)
+{
+       struct can_device_stats *can_stats = &dev->can.can_stats;
+
+       if (cf->can_id & CAN_ERR_RESTARTED) {
+               dev->can.state = CAN_STATE_ERROR_ACTIVE;
+               can_stats->restarts++;
+       } else if (cf->can_id & CAN_ERR_BUSOFF) {
+               dev->can.state = CAN_STATE_BUS_OFF;
+               can_stats->bus_off++;
+       } else if (cf->can_id & CAN_ERR_CRTL) {
+               if ((cf->data[1] & CAN_ERR_CRTL_TX_WARNING) ||
+                   (cf->data[1] & CAN_ERR_CRTL_RX_WARNING)) {
+                       dev->can.state = CAN_STATE_ERROR_WARNING;
+                       can_stats->error_warning++;
+               } else if ((cf->data[1] & CAN_ERR_CRTL_TX_PASSIVE) ||
+                          (cf->data[1] & CAN_ERR_CRTL_RX_PASSIVE)) {
+                       dev->can.state = CAN_STATE_ERROR_PASSIVE;
+                       can_stats->error_passive++;
+               } else {
+                       dev->can.state = CAN_STATE_ERROR_ACTIVE;
+               }
+       }
+}
+
+static void gs_usb_recieve_bulk_callback(struct urb *urb)
+{
+       struct gs_usb *usbcan = urb->context;
+       struct gs_can *dev;
+       struct net_device *netdev;
+       int rc;
+       struct net_device_stats *stats;
+       struct gs_host_frame *hf = urb->transfer_buffer;
+       struct gs_tx_context *txc;
+       struct can_frame *cf;
+       struct sk_buff *skb;
+
+       BUG_ON(!usbcan);
+
+       switch (urb->status) {
+       case 0: /* success */
+               break;
+       case -ENOENT:
+       case -ESHUTDOWN:
+               return;
+       default:
+               /* do not resubmit aborted urbs. eg: when device goes down */
+               return;
+       }
+
+       /* device reports out of range channel id */
+       if (hf->channel >= GS_MAX_INTF)
+               goto resubmit_urb;
+
+       dev = usbcan->canch[hf->channel];
+
+       netdev = dev->netdev;
+       stats = &netdev->stats;
+
+       if (!netif_device_present(netdev))
+               return;
+
+       if (hf->echo_id == -1) { /* normal rx */
+               skb = alloc_can_skb(dev->netdev, &cf);
+               if (!skb)
+                       return;
+
+               cf->can_id = hf->can_id;
+
+               cf->can_dlc = get_can_dlc(hf->can_dlc);
+               memcpy(cf->data, hf->data, 8);
+
+               /* ERROR frames tell us information about the controller */
+               if (hf->can_id & CAN_ERR_FLAG)
+                       gs_update_state(dev, cf);
+
+               netdev->stats.rx_packets++;
+               netdev->stats.rx_bytes += hf->can_dlc;
+
+               netif_rx(skb);
+       } else { /* echo_id == hf->echo_id */
+               if (hf->echo_id >= GS_MAX_TX_URBS) {
+                       netdev_err(netdev,
+                                  "Unexpected out of range echo id %d\n",
+                                  hf->echo_id);
+                       goto resubmit_urb;
+               }
+
+               netdev->stats.tx_packets++;
+               netdev->stats.tx_bytes += hf->can_dlc;
+
+               txc = gs_get_tx_context(dev, hf->echo_id);
+
+               /* bad devices send bad echo_ids. */
+               if (!txc) {
+                       netdev_err(netdev,
+                                  "Unexpected unused echo id %d\n",
+                                  hf->echo_id);
+                       goto resubmit_urb;
+               }
+
+               can_get_echo_skb(netdev, hf->echo_id);
+
+               gs_free_tx_context(txc);
+
+               netif_wake_queue(netdev);
+       }
+
+       if (hf->flags & GS_CAN_FLAG_OVERFLOW) {
+               skb = alloc_can_err_skb(netdev, &cf);
+               if (!skb)
+                       goto resubmit_urb;
+
+               cf->can_id |= CAN_ERR_CRTL;
+               cf->can_dlc = CAN_ERR_DLC;
+               cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
+               stats->rx_over_errors++;
+               stats->rx_errors++;
+               netif_rx(skb);
+       }
+
+ resubmit_urb:
+       usb_fill_bulk_urb(urb,
+                         usbcan->udev,
+                         usb_rcvbulkpipe(usbcan->udev, GSUSB_ENDPOINT_IN),
+                         hf,
+                         sizeof(struct gs_host_frame),
+                         gs_usb_recieve_bulk_callback,
+                         usbcan
+                         );
+
+       rc = usb_submit_urb(urb, GFP_ATOMIC);
+
+       /* USB failure take down all interfaces */
+       if (rc == -ENODEV) {
+               for (rc = 0; rc < GS_MAX_INTF; rc++) {
+                       if (usbcan->canch[rc])
+                               netif_device_detach(usbcan->canch[rc]->netdev);
+               }
+       }
+}
+
+static int gs_usb_set_bittiming(struct net_device *netdev)
+{
+       struct gs_can *dev = netdev_priv(netdev);
+       struct can_bittiming *bt = &dev->can.bittiming;
+       struct usb_interface *intf = dev->iface;
+       int rc;
+       struct gs_device_bittiming *dbt;
+
+       dbt = kmalloc(sizeof(*dbt), GFP_KERNEL);
+       if (!dbt)
+               return -ENOMEM;
+
+       dbt->prop_seg = bt->prop_seg;
+       dbt->phase_seg1 = bt->phase_seg1;
+       dbt->phase_seg2 = bt->phase_seg2;
+       dbt->sjw = bt->sjw;
+       dbt->brp = bt->brp;
+
+       /* request bit timings */
+       rc = usb_control_msg(interface_to_usbdev(intf),
+                            usb_sndctrlpipe(interface_to_usbdev(intf), 0),
+                            GS_USB_BREQ_BITTIMING,
+                            USB_DIR_OUT|USB_TYPE_VENDOR|USB_RECIP_INTERFACE,
+                            dev->channel,
+                            0,
+                            dbt,
+                            sizeof(*dbt),
+                            1000);
+
+       kfree(dbt);
+
+       if (rc < 0)
+               dev_err(netdev->dev.parent, "Couldn't set bittimings (err=%d)",
+                       rc);
+
+       return rc;
+}
+
+static void gs_usb_xmit_callback(struct urb *urb)
+{
+       struct gs_tx_context *txc = urb->context;
+       struct gs_can *dev = txc->dev;
+       struct net_device *netdev = dev->netdev;
+
+       if (urb->status)
+               netdev_info(netdev, "usb xmit fail %d\n", txc->echo_id);
+
+       usb_free_coherent(urb->dev,
+                         urb->transfer_buffer_length,
+                         urb->transfer_buffer,
+                         urb->transfer_dma);
+
+       atomic_dec(&dev->active_tx_urbs);
+
+       if (!netif_device_present(netdev))
+               return;
+
+       if (netif_queue_stopped(netdev))
+               netif_wake_queue(netdev);
+}
+
+static netdev_tx_t gs_can_start_xmit(struct sk_buff *skb, struct net_device *netdev)
+{
+       struct gs_can *dev = netdev_priv(netdev);
+       struct net_device_stats *stats = &dev->netdev->stats;
+       struct urb *urb;
+       struct gs_host_frame *hf;
+       struct can_frame *cf;
+       int rc;
+       unsigned int idx;
+       struct gs_tx_context *txc;
+
+       if (can_dropped_invalid_skb(netdev, skb))
+               return NETDEV_TX_OK;
+
+       /* find an empty context to keep track of transmission */
+       txc = gs_alloc_tx_context(dev);
+       if (!txc)
+               return NETDEV_TX_BUSY;
+
+       /* create a URB, and a buffer for it */
+       urb = usb_alloc_urb(0, GFP_ATOMIC);
+       if (!urb) {
+               netdev_err(netdev, "No memory left for URB\n");
+               goto nomem_urb;
+       }
+
+       hf = usb_alloc_coherent(dev->udev, sizeof(*hf), GFP_ATOMIC,
+                               &urb->transfer_dma);
+       if (!hf) {
+               netdev_err(netdev, "No memory left for USB buffer\n");
+               goto nomem_hf;
+       }
+
+       idx = txc->echo_id;
+
+       if (idx >= GS_MAX_TX_URBS) {
+               netdev_err(netdev, "Invalid tx context %d\n", idx);
+               goto badidx;
+       }
+
+       hf->echo_id = idx;
+       hf->channel = dev->channel;
+
+       cf = (struct can_frame *)skb->data;
+
+       hf->can_id = cf->can_id;
+       hf->can_dlc = cf->can_dlc;
+       memcpy(hf->data, cf->data, cf->can_dlc);
+
+       usb_fill_bulk_urb(urb, dev->udev,
+                         usb_sndbulkpipe(dev->udev, GSUSB_ENDPOINT_OUT),
+                         hf,
+                         sizeof(*hf),
+                         gs_usb_xmit_callback,
+                         txc);
+
+       urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
+       usb_anchor_urb(urb, &dev->tx_submitted);
+
+       can_put_echo_skb(skb, netdev, idx);
+
+       atomic_inc(&dev->active_tx_urbs);
+
+       rc = usb_submit_urb(urb, GFP_ATOMIC);
+       if (unlikely(rc)) {                     /* usb send failed */
+               atomic_dec(&dev->active_tx_urbs);
+
+               can_free_echo_skb(netdev, idx);
+               gs_free_tx_context(txc);
+
+               usb_unanchor_urb(urb);
+               usb_free_coherent(dev->udev,
+                                 sizeof(*hf),
+                                 hf,
+                                 urb->transfer_dma);
+
+
+               if (rc == -ENODEV) {
+                       netif_device_detach(netdev);
+               } else {
+                       netdev_err(netdev, "usb_submit failed (err=%d)\n", rc);
+                       stats->tx_dropped++;
+               }
+       } else {
+               /* Slow down tx path */
+               if (atomic_read(&dev->active_tx_urbs) >= GS_MAX_TX_URBS)
+                       netif_stop_queue(netdev);
+       }
+
+       /* let usb core take care of this urb */
+       usb_free_urb(urb);
+
+       return NETDEV_TX_OK;
+
+ badidx:
+       usb_free_coherent(dev->udev,
+                         sizeof(*hf),
+                         hf,
+                         urb->transfer_dma);
+ nomem_hf:
+       usb_free_urb(urb);
+
+ nomem_urb:
+       gs_free_tx_context(txc);
+       dev_kfree_skb(skb);
+       stats->tx_dropped++;
+       return NETDEV_TX_OK;
+}
+
+static int gs_can_open(struct net_device *netdev)
+{
+       struct gs_can *dev = netdev_priv(netdev);
+       struct gs_usb *parent = dev->parent;
+       int rc, i;
+       struct gs_device_mode *dm;
+       u32 ctrlmode;
+
+       rc = open_candev(netdev);
+       if (rc)
+               return rc;
+
+       if (atomic_add_return(1, &parent->active_channels) == 1) {
+               for (i = 0; i < GS_MAX_RX_URBS; i++) {
+                       struct urb *urb;
+                       u8 *buf;
+
+                       /* alloc rx urb */
+                       urb = usb_alloc_urb(0, GFP_KERNEL);
+                       if (!urb) {
+                               netdev_err(netdev,
+                                          "No memory left for URB\n");
+                               return -ENOMEM;
+                       }
+
+                       /* alloc rx buffer */
+                       buf = usb_alloc_coherent(dev->udev,
+                                                sizeof(struct gs_host_frame),
+                                                GFP_KERNEL,
+                                                &urb->transfer_dma);
+                       if (!buf) {
+                               netdev_err(netdev,
+                                          "No memory left for USB buffer\n");
+                               usb_free_urb(urb);
+                               return -ENOMEM;
+                       }
+
+                       /* fill, anchor, and submit rx urb */
+                       usb_fill_bulk_urb(urb,
+                                         dev->udev,
+                                         usb_rcvbulkpipe(dev->udev,
+                                                         GSUSB_ENDPOINT_IN),
+                                         buf,
+                                         sizeof(struct gs_host_frame),
+                                         gs_usb_recieve_bulk_callback,
+                                         parent);
+                       urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
+
+                       usb_anchor_urb(urb, &parent->rx_submitted);
+
+                       rc = usb_submit_urb(urb, GFP_KERNEL);
+                       if (rc) {
+                               if (rc == -ENODEV)
+                                       netif_device_detach(dev->netdev);
+
+                               netdev_err(netdev,
+                                          "usb_submit failed (err=%d)\n",
+                                          rc);
+
+                               usb_unanchor_urb(urb);
+                               break;
+                       }
+
+                       /* Drop reference,
+                        * USB core will take care of freeing it
+                        */
+                       usb_free_urb(urb);
+               }
+       }
+
+       dm = kmalloc(sizeof(*dm), GFP_KERNEL);
+       if (!dm)
+               return -ENOMEM;
+
+       /* flags */
+       ctrlmode = dev->can.ctrlmode;
+       dm->flags = 0;
+
+       if (ctrlmode & CAN_CTRLMODE_LOOPBACK)
+               dm->flags |= GS_CAN_MODE_LOOP_BACK;
+       else if (ctrlmode & CAN_CTRLMODE_LISTENONLY)
+               dm->flags |= GS_CAN_MODE_LISTEN_ONLY;
+
+       /* Controller is not allowed to retry TX
+        * this mode is unavailable on atmels uc3c hardware
+        */
+       if (ctrlmode & CAN_CTRLMODE_ONE_SHOT)
+               dm->flags |= GS_CAN_MODE_ONE_SHOT;
+
+       if (ctrlmode & CAN_CTRLMODE_3_SAMPLES)
+               dm->flags |= GS_CAN_MODE_TRIPLE_SAMPLE;
+
+       /* finally start device */
+       dm->mode = GS_CAN_MODE_START;
+       rc = usb_control_msg(interface_to_usbdev(dev->iface),
+                            usb_sndctrlpipe(interface_to_usbdev(dev->iface), 0),
+                            GS_USB_BREQ_MODE,
+                            USB_DIR_OUT|USB_TYPE_VENDOR|USB_RECIP_INTERFACE,
+                            dev->channel,
+                            0,
+                            dm,
+                            sizeof(*dm),
+                            1000);
+
+       if (rc < 0) {
+               netdev_err(netdev, "Couldn't start device (err=%d)\n", rc);
+               kfree(dm);
+               return rc;
+       }
+
+       kfree(dm);
+
+       dev->can.state = CAN_STATE_ERROR_ACTIVE;
+
+       if (!(dev->can.ctrlmode & CAN_CTRLMODE_LISTENONLY))
+               netif_start_queue(netdev);
+
+       return 0;
+}
+
+static int gs_can_close(struct net_device *netdev)
+{
+       int rc;
+       struct gs_can *dev = netdev_priv(netdev);
+       struct gs_usb *parent = dev->parent;
+
+       netif_stop_queue(netdev);
+
+       /* Stop polling */
+       if (atomic_dec_and_test(&parent->active_channels))
+               usb_kill_anchored_urbs(&parent->rx_submitted);
+
+       /* Stop sending URBs */
+       usb_kill_anchored_urbs(&dev->tx_submitted);
+       atomic_set(&dev->active_tx_urbs, 0);
+
+       /* reset the device */
+       rc = gs_cmd_reset(parent, dev);
+       if (rc < 0)
+               netdev_warn(netdev, "Couldn't shutdown device (err=%d)", rc);
+
+       /* reset tx contexts */
+       for (rc = 0; rc < GS_MAX_TX_URBS; rc++) {
+               dev->tx_context[rc].dev = dev;
+               dev->tx_context[rc].echo_id = GS_MAX_TX_URBS;
+       }
+
+       /* close the netdev */
+       close_candev(netdev);
+
+       return 0;
+}
+
+static const struct net_device_ops gs_usb_netdev_ops = {
+       .ndo_open = gs_can_open,
+       .ndo_stop = gs_can_close,
+       .ndo_start_xmit = gs_can_start_xmit,
+};
+
+static struct gs_can *gs_make_candev(unsigned int channel, struct usb_interface *intf)
+{
+       struct gs_can *dev;
+       struct net_device *netdev;
+       int rc;
+       struct gs_device_bt_const *bt_const;
+
+       bt_const = kmalloc(sizeof(*bt_const), GFP_KERNEL);
+       if (!bt_const)
+               return ERR_PTR(-ENOMEM);
+
+       /* fetch bit timing constants */
+       rc = usb_control_msg(interface_to_usbdev(intf),
+                            usb_rcvctrlpipe(interface_to_usbdev(intf), 0),
+                            GS_USB_BREQ_BT_CONST,
+                            USB_DIR_IN|USB_TYPE_VENDOR|USB_RECIP_INTERFACE,
+                            channel,
+                            0,
+                            bt_const,
+                            sizeof(*bt_const),
+                            1000);
+
+       if (rc < 0) {
+               dev_err(&intf->dev,
+                       "Couldn't get bit timing const for channel (err=%d)\n",
+                       rc);
+               kfree(bt_const);
+               return ERR_PTR(rc);
+       }
+
+       /* create netdev */
+       netdev = alloc_candev(sizeof(struct gs_can), GS_MAX_TX_URBS);
+       if (!netdev) {
+               dev_err(&intf->dev, "Couldn't allocate candev\n");
+               kfree(bt_const);
+               return ERR_PTR(-ENOMEM);
+       }
+
+       dev = netdev_priv(netdev);
+
+       netdev->netdev_ops = &gs_usb_netdev_ops;
+
+       netdev->flags |= IFF_ECHO; /* we support full roundtrip echo */
+
+       /* dev settup */
+       strcpy(dev->bt_const.name, "gs_usb");
+       dev->bt_const.tseg1_min = bt_const->tseg1_min;
+       dev->bt_const.tseg1_max = bt_const->tseg1_max;
+       dev->bt_const.tseg2_min = bt_const->tseg2_min;
+       dev->bt_const.tseg2_max = bt_const->tseg2_max;
+       dev->bt_const.sjw_max = bt_const->sjw_max;
+       dev->bt_const.brp_min = bt_const->brp_min;
+       dev->bt_const.brp_max = bt_const->brp_max;
+       dev->bt_const.brp_inc = bt_const->brp_inc;
+
+       dev->udev = interface_to_usbdev(intf);
+       dev->iface = intf;
+       dev->netdev = netdev;
+       dev->channel = channel;
+
+       init_usb_anchor(&dev->tx_submitted);
+       atomic_set(&dev->active_tx_urbs, 0);
+       spin_lock_init(&dev->tx_ctx_lock);
+       for (rc = 0; rc < GS_MAX_TX_URBS; rc++) {
+               dev->tx_context[rc].dev = dev;
+               dev->tx_context[rc].echo_id = GS_MAX_TX_URBS;
+       }
+
+       /* can settup */
+       dev->can.state = CAN_STATE_STOPPED;
+       dev->can.clock.freq = bt_const->fclk_can;
+       dev->can.bittiming_const = &dev->bt_const;
+       dev->can.do_set_bittiming = gs_usb_set_bittiming;
+
+       dev->can.ctrlmode_supported = 0;
+
+       if (bt_const->feature & GS_CAN_FEATURE_LISTEN_ONLY)
+               dev->can.ctrlmode_supported |= CAN_CTRLMODE_LISTENONLY;
+
+       if (bt_const->feature & GS_CAN_FEATURE_LOOP_BACK)
+               dev->can.ctrlmode_supported |= CAN_CTRLMODE_LOOPBACK;
+
+       if (bt_const->feature & GS_CAN_FEATURE_TRIPLE_SAMPLE)
+               dev->can.ctrlmode_supported |= CAN_CTRLMODE_3_SAMPLES;
+
+       if (bt_const->feature & GS_CAN_FEATURE_ONE_SHOT)
+               dev->can.ctrlmode_supported |= CAN_CTRLMODE_ONE_SHOT;
+
+       kfree(bt_const);
+
+       SET_NETDEV_DEV(netdev, &intf->dev);
+
+       rc = register_candev(dev->netdev);
+       if (rc) {
+               free_candev(dev->netdev);
+               dev_err(&intf->dev, "Couldn't register candev (err=%d)\n", rc);
+               return ERR_PTR(rc);
+       }
+
+       return dev;
+}
+
+static void gs_destroy_candev(struct gs_can *dev)
+{
+       unregister_candev(dev->netdev);
+       free_candev(dev->netdev);
+       usb_kill_anchored_urbs(&dev->tx_submitted);
+       kfree(dev);
+}
+
+static int gs_usb_probe(struct usb_interface *intf, const struct usb_device_id *id)
+{
+       struct gs_usb *dev;
+       int rc = -ENOMEM;
+       unsigned int icount, i;
+       struct gs_host_config *hconf;
+       struct gs_device_config *dconf;
+
+       hconf = kmalloc(sizeof(*hconf), GFP_KERNEL);
+       if (!hconf)
+               return -ENOMEM;
+
+       hconf->byte_order = 0x0000beef;
+
+       /* send host config */
+       rc = usb_control_msg(interface_to_usbdev(intf),
+                            usb_sndctrlpipe(interface_to_usbdev(intf), 0),
+                            GS_USB_BREQ_HOST_FORMAT,
+                            USB_DIR_OUT|USB_TYPE_VENDOR|USB_RECIP_INTERFACE,
+                            1,
+                            intf->altsetting[0].desc.bInterfaceNumber,
+                            hconf,
+                            sizeof(*hconf),
+                            1000);
+
+       kfree(hconf);
+
+       if (rc < 0) {
+               dev_err(&intf->dev, "Couldn't send data format (err=%d)\n",
+                       rc);
+               return rc;
+       }
+
+       dconf = kmalloc(sizeof(*dconf), GFP_KERNEL);
+       if (!dconf)
+               return -ENOMEM;
+
+       /* read device config */
+       rc = usb_control_msg(interface_to_usbdev(intf),
+                            usb_rcvctrlpipe(interface_to_usbdev(intf), 0),
+                            GS_USB_BREQ_DEVICE_CONFIG,
+                            USB_DIR_IN|USB_TYPE_VENDOR|USB_RECIP_INTERFACE,
+                            1,
+                            intf->altsetting[0].desc.bInterfaceNumber,
+                            dconf,
+                            sizeof(*dconf),
+                            1000);
+       if (rc < 0) {
+               dev_err(&intf->dev, "Couldn't get device config: (err=%d)\n",
+                       rc);
+
+               kfree(dconf);
+
+               return rc;
+       }
+
+       icount = dconf->icount+1;
+
+       kfree(dconf);
+
+       dev_info(&intf->dev, "Configuring for %d interfaces\n", icount);
+
+       if (icount > GS_MAX_INTF) {
+               dev_err(&intf->dev,
+                       "Driver cannot handle more that %d CAN interfaces\n",
+                       GS_MAX_INTF);
+               return -EINVAL;
+       }
+
+       dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+       init_usb_anchor(&dev->rx_submitted);
+
+       atomic_set(&dev->active_channels, 0);
+
+       usb_set_intfdata(intf, dev);
+       dev->udev = interface_to_usbdev(intf);
+
+       for (i = 0; i < icount; i++) {
+               dev->canch[i] = gs_make_candev(i, intf);
+               if (IS_ERR_OR_NULL(dev->canch[i])) {
+                       /* on failure destroy previously created candevs */
+                       icount = i;
+                       for (i = 0; i < icount; i++) {
+                               gs_destroy_candev(dev->canch[i]);
+                               dev->canch[i] = NULL;
+                       }
+                       kfree(dev);
+                       return rc;
+               }
+               dev->canch[i]->parent = dev;
+       }
+
+       return 0;
+}
+
+static void gs_usb_disconnect(struct usb_interface *intf)
+{
+       unsigned i;
+       struct gs_usb *dev = usb_get_intfdata(intf);
+       usb_set_intfdata(intf, NULL);
+
+       if (!dev) {
+               dev_err(&intf->dev, "Disconnect (nodata)\n");
+               return;
+       }
+
+       for (i = 0; i < GS_MAX_INTF; i++) {
+               struct gs_can *can = dev->canch[i];
+
+               if (!can)
+                       continue;
+
+               gs_destroy_candev(can);
+       }
+
+       usb_kill_anchored_urbs(&dev->rx_submitted);
+}
+
+static const struct usb_device_id gs_usb_table[] = {
+       {USB_DEVICE(USB_GSUSB_1_VENDOR_ID, USB_GSUSB_1_PRODUCT_ID)},
+       {} /* Terminating entry */
+};
+
+MODULE_DEVICE_TABLE(usb, gs_usb_table);
+
+static struct usb_driver gs_usb_driver = {
+       .name       = "gs_usb",
+       .probe      = gs_usb_probe,
+       .disconnect = gs_usb_disconnect,
+       .id_table   = gs_usb_table,
+};
+
+module_usb_driver(gs_usb_driver);
+
+MODULE_AUTHOR("Maximilian Schneider <mws@schneidersoft.net>");
+MODULE_DESCRIPTION(
+"Socket CAN device driver for Geschwister Schneider Technologie-, "
+"Entwicklungs- und Vertriebs UG. USB2.0 to CAN interfaces.");
+MODULE_LICENSE("GPL v2");
index 4ca46edc061d761169a85fc58937dee449a4776f..541fb7a05625aaf889089704ec155956629e77c8 100644 (file)
@@ -53,6 +53,8 @@
 #define USB_OEM_MERCURY_PRODUCT_ID     34
 #define USB_OEM_LEAF_PRODUCT_ID                35
 #define USB_CAN_R_PRODUCT_ID           39
+#define USB_LEAF_LITE_V2_PRODUCT_ID    288
+#define USB_MINI_PCIE_HS_PRODUCT_ID    289
 
 /* USB devices features */
 #define KVASER_HAS_SILENT_MODE         BIT(0)
@@ -356,6 +358,8 @@ static const struct usb_device_id kvaser_usb_table[] = {
                .driver_info = KVASER_HAS_TXRX_ERRORS },
        { USB_DEVICE(KVASER_VENDOR_ID, USB_CAN_R_PRODUCT_ID),
                .driver_info = KVASER_HAS_TXRX_ERRORS },
+       { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LITE_V2_PRODUCT_ID) },
+       { USB_DEVICE(KVASER_VENDOR_ID, USB_MINI_PCIE_HS_PRODUCT_ID) },
        { }
 };
 MODULE_DEVICE_TABLE(usb, kvaser_usb_table);
@@ -379,38 +383,43 @@ static int kvaser_usb_wait_msg(const struct kvaser_usb *dev, u8 id,
        void *buf;
        int actual_len;
        int err;
-       int pos = 0;
+       int pos;
+       unsigned long to = jiffies + msecs_to_jiffies(USB_RECV_TIMEOUT);
 
        buf = kzalloc(RX_BUFFER_SIZE, GFP_KERNEL);
        if (!buf)
                return -ENOMEM;
 
-       err = usb_bulk_msg(dev->udev,
-                          usb_rcvbulkpipe(dev->udev,
-                                          dev->bulk_in->bEndpointAddress),
-                          buf, RX_BUFFER_SIZE, &actual_len,
-                          USB_RECV_TIMEOUT);
-       if (err < 0)
-               goto end;
+       do {
+               err = usb_bulk_msg(dev->udev,
+                                  usb_rcvbulkpipe(dev->udev,
+                                       dev->bulk_in->bEndpointAddress),
+                                  buf, RX_BUFFER_SIZE, &actual_len,
+                                  USB_RECV_TIMEOUT);
+               if (err < 0)
+                       goto end;
 
-       while (pos <= actual_len - MSG_HEADER_LEN) {
-               tmp = buf + pos;
+               pos = 0;
+               while (pos <= actual_len - MSG_HEADER_LEN) {
+                       tmp = buf + pos;
 
-               if (!tmp->len)
-                       break;
+                       if (!tmp->len)
+                               break;
 
-               if (pos + tmp->len > actual_len) {
-                       dev_err(dev->udev->dev.parent, "Format error\n");
-                       break;
-               }
+                       if (pos + tmp->len > actual_len) {
+                               dev_err(dev->udev->dev.parent,
+                                       "Format error\n");
+                               break;
+                       }
 
-               if (tmp->id == id) {
-                       memcpy(msg, tmp, tmp->len);
-                       goto end;
-               }
+                       if (tmp->id == id) {
+                               memcpy(msg, tmp, tmp->len);
+                               goto end;
+                       }
 
-               pos += tmp->len;
-       }
+                       pos += tmp->len;
+               }
+       } while (time_before(jiffies, to));
 
        err = -EINVAL;
 
index 41ee5b6ae91751239d81e4613f336aff02fbff84..69c42513dd724bb06ef40eaf4b5e4bbea800936d 100644 (file)
@@ -289,7 +289,7 @@ static int mv88e6123_61_65_setup_port(struct dsa_switch *ds, int p)
 
 static int mv88e6123_61_65_setup(struct dsa_switch *ds)
 {
-       struct mv88e6xxx_priv_state *ps = (void *)(ds + 1);
+       struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
        int i;
        int ret;
 
index dadfafba64e9ac0aad3de55d84e5bdea3ceb00a0..953bc6a49e594471342270e7281a8e263f0086de 100644 (file)
@@ -155,7 +155,7 @@ static int mv88e6131_setup_global(struct dsa_switch *ds)
 
 static int mv88e6131_setup_port(struct dsa_switch *ds, int p)
 {
-       struct mv88e6xxx_priv_state *ps = (void *)(ds + 1);
+       struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
        int addr = REG_PORT(p);
        u16 val;
 
@@ -274,7 +274,7 @@ static int mv88e6131_setup_port(struct dsa_switch *ds, int p)
 
 static int mv88e6131_setup(struct dsa_switch *ds)
 {
-       struct mv88e6xxx_priv_state *ps = (void *)(ds + 1);
+       struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
        int i;
        int ret;
 
index 17314ed9456d32c961aea31db10a650a2ffb1392..9ce2146346b6cda7175229d0f493f004730ab1b1 100644 (file)
@@ -74,7 +74,7 @@ int __mv88e6xxx_reg_read(struct mii_bus *bus, int sw_addr, int addr, int reg)
 
 int mv88e6xxx_reg_read(struct dsa_switch *ds, int addr, int reg)
 {
-       struct mv88e6xxx_priv_state *ps = (void *)(ds + 1);
+       struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
        int ret;
 
        mutex_lock(&ps->smi_mutex);
@@ -118,7 +118,7 @@ int __mv88e6xxx_reg_write(struct mii_bus *bus, int sw_addr, int addr,
 
 int mv88e6xxx_reg_write(struct dsa_switch *ds, int addr, int reg, u16 val)
 {
-       struct mv88e6xxx_priv_state *ps = (void *)(ds + 1);
+       struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
        int ret;
 
        mutex_lock(&ps->smi_mutex);
@@ -256,7 +256,7 @@ static void mv88e6xxx_ppu_reenable_timer(unsigned long _ps)
 
 static int mv88e6xxx_ppu_access_get(struct dsa_switch *ds)
 {
-       struct mv88e6xxx_priv_state *ps = (void *)(ds + 1);
+       struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
        int ret;
 
        mutex_lock(&ps->ppu_mutex);
@@ -283,7 +283,7 @@ static int mv88e6xxx_ppu_access_get(struct dsa_switch *ds)
 
 static void mv88e6xxx_ppu_access_put(struct dsa_switch *ds)
 {
-       struct mv88e6xxx_priv_state *ps = (void *)(ds + 1);
+       struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
 
        /* Schedule a timer to re-enable the PHY polling unit. */
        mod_timer(&ps->ppu_timer, jiffies + msecs_to_jiffies(10));
@@ -292,7 +292,7 @@ static void mv88e6xxx_ppu_access_put(struct dsa_switch *ds)
 
 void mv88e6xxx_ppu_state_init(struct dsa_switch *ds)
 {
-       struct mv88e6xxx_priv_state *ps = (void *)(ds + 1);
+       struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
 
        mutex_init(&ps->ppu_mutex);
        INIT_WORK(&ps->ppu_work, mv88e6xxx_ppu_reenable_work);
@@ -463,7 +463,7 @@ void mv88e6xxx_get_ethtool_stats(struct dsa_switch *ds,
                                 int nr_stats, struct mv88e6xxx_hw_stat *stats,
                                 int port, uint64_t *data)
 {
-       struct mv88e6xxx_priv_state *ps = (void *)(ds + 1);
+       struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
        int ret;
        int i;
 
index 35df0b9e6848b0f1bd964f7b93ed2f18988c6b08..a968654b631d28a860dcd83b8c9b728189c196b2 100644 (file)
@@ -534,7 +534,7 @@ static int el3_common_init(struct net_device *dev)
        /* The EL3-specific entries in the device structure. */
        dev->netdev_ops = &netdev_ops;
        dev->watchdog_timeo = TX_TIMEOUT;
-       SET_ETHTOOL_OPS(dev, &ethtool_ops);
+       dev->ethtool_ops = &ethtool_ops;
 
        err = register_netdev(dev);
        if (err) {
index 063557e037f21b8b43fa3d1dfe64a19c870d4789..f18647c2355990ba0e55b6a2690c0cdbb819da43 100644 (file)
@@ -218,7 +218,7 @@ static int tc589_probe(struct pcmcia_device *link)
        dev->netdev_ops = &el3_netdev_ops;
        dev->watchdog_timeo = TX_TIMEOUT;
 
-       SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
+       dev->ethtool_ops = &netdev_ethtool_ops;
 
        return tc589_config(link);
 }
index 465cc7108d8a5e5bbb2c34df44f7bd0329d02d6b..e13b04624dedcecb7285943787946f2dfadd610c 100644 (file)
@@ -2435,7 +2435,7 @@ typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
        netif_napi_add(dev, &tp->napi, typhoon_poll, 16);
        dev->watchdog_timeo     = TX_TIMEOUT;
 
-       SET_ETHTOOL_OPS(dev, &typhoon_ethtool_ops);
+       dev->ethtool_ops = &typhoon_ethtool_ops;
 
        /* We can handle scatter gather, up to 16 entries, and
         * we can do IP checksumming (only version 4, doh...)
index 455d4c399b52168ce6f8691160991e500216ff54..1d162ccb473341af256279b29d466bc56429472b 100644 (file)
@@ -157,7 +157,7 @@ static void ax_reset_8390(struct net_device *dev)
 
        /* This check _should_not_ be necessary, omit eventually. */
        while ((ei_inb(addr + EN0_ISR) & ENISR_RESET) == 0) {
-               if (jiffies - reset_start_time > 2 * HZ / 100) {
+               if (time_after(jiffies, reset_start_time + 2 * HZ / 100)) {
                        netdev_warn(dev, "%s: did not complete.\n", __func__);
                        break;
                }
@@ -293,7 +293,7 @@ static void ax_block_output(struct net_device *dev, int count,
        dma_start = jiffies;
 
        while ((ei_inb(nic_base + EN0_ISR) & ENISR_RDC) == 0) {
-               if (jiffies - dma_start > 2 * HZ / 100) {               /* 20ms */
+               if (time_after(jiffies, dma_start + 2 * HZ / 100)) { /* 20ms */
                        netdev_warn(dev, "timeout waiting for Tx RDC.\n");
                        ax_reset_8390(dev);
                        ax_NS8390_init(dev, 1);
index 39b26fe28d1051ff916faceb747da7a64dac711f..d7401017a3f10940f3a662bebc555d835be3ce4b 100644 (file)
@@ -35,6 +35,18 @@ source "drivers/net/ethernet/calxeda/Kconfig"
 source "drivers/net/ethernet/chelsio/Kconfig"
 source "drivers/net/ethernet/cirrus/Kconfig"
 source "drivers/net/ethernet/cisco/Kconfig"
+
+config CX_ECAT
+       tristate "Beckhoff CX5020 EtherCAT master support"
+       depends on PCI
+       ---help---
+         Driver for EtherCAT master module located on CCAT FPGA
+         that can be found on Beckhoff CX5020, and possibly other of CX
+         Beckhoff CX series industrial PCs.
+
+         To compile this driver as a module, choose M here. The module
+         will be called ec_bhf.
+
 source "drivers/net/ethernet/davicom/Kconfig"
 
 config DNET
index 545d0b3b9cb422b2fefa7122b074cd869a9085c2..35190e36c4568e6803279f878a6aa866685ca1be 100644 (file)
@@ -21,6 +21,7 @@ obj-$(CONFIG_NET_CALXEDA_XGMAC) += calxeda/
 obj-$(CONFIG_NET_VENDOR_CHELSIO) += chelsio/
 obj-$(CONFIG_NET_VENDOR_CIRRUS) += cirrus/
 obj-$(CONFIG_NET_VENDOR_CISCO) += cisco/
+obj-$(CONFIG_CX_ECAT) += ec_bhf.o
 obj-$(CONFIG_DM9000) += davicom/
 obj-$(CONFIG_DNET) += dnet.o
 obj-$(CONFIG_NET_VENDOR_DEC) += dec/
index 171d73c1d3c22de3209ca6c48b7978c35b2a38b0..40dbbf740331c49b13ac0d64ef977be6ce361993 100644 (file)
@@ -784,7 +784,7 @@ static int starfire_init_one(struct pci_dev *pdev,
 
        dev->netdev_ops = &netdev_ops;
        dev->watchdog_timeo = TX_TIMEOUT;
-       SET_ETHTOOL_OPS(dev, &ethtool_ops);
+       dev->ethtool_ops = &ethtool_ops;
 
        netif_napi_add(dev, &np->napi, netdev_poll, max_interrupt_work);
 
index 1517e9df5ba16c66f6f63bc10f8a0e1b460a95a3..9a6991be9749f75b4107d979cd31518cda837ec0 100644 (file)
@@ -476,7 +476,7 @@ static int acenic_probe_one(struct pci_dev *pdev,
        dev->watchdog_timeo = 5*HZ;
 
        dev->netdev_ops = &ace_netdev_ops;
-       SET_ETHTOOL_OPS(dev, &ace_ethtool_ops);
+       dev->ethtool_ops = &ace_ethtool_ops;
 
        /* we only display this string ONCE */
        if (!boards_found)
index 80c1ab74a4b8f36483ebe710aa56ffb3879d52bc..fdddba51473efce74edbf9c0befae1ebdfbe1824 100644 (file)
@@ -1,5 +1,6 @@
 config ALTERA_TSE
        tristate "Altera Triple-Speed Ethernet MAC support"
+       depends on HAS_DMA
        select PHYLIB
        ---help---
          This driver supports the Altera Triple-Speed (TSE) Ethernet MAC.
index 3df18669ea306580994cb877044a000aa4b7e6f9..38c500f95b9e97249dfd7b1e4b14dab0d6a12816 100644 (file)
@@ -18,6 +18,7 @@
 #include "altera_utils.h"
 #include "altera_tse.h"
 #include "altera_msgdmahw.h"
+#include "altera_msgdma.h"
 
 /* No initialization work to do for MSGDMA */
 int msgdma_initialize(struct altera_tse_private *priv)
@@ -29,13 +30,15 @@ void msgdma_uninitialize(struct altera_tse_private *priv)
 {
 }
 
+void msgdma_start_rxdma(struct altera_tse_private *priv)
+{
+}
+
 void msgdma_reset(struct altera_tse_private *priv)
 {
        int counter;
-       struct msgdma_csr *txcsr =
-               (struct msgdma_csr *)priv->tx_dma_csr;
-       struct msgdma_csr *rxcsr =
-               (struct msgdma_csr *)priv->rx_dma_csr;
+       struct msgdma_csr *txcsr = priv->tx_dma_csr;
+       struct msgdma_csr *rxcsr = priv->rx_dma_csr;
 
        /* Reset Rx mSGDMA */
        iowrite32(MSGDMA_CSR_STAT_MASK, &rxcsr->status);
@@ -133,8 +136,7 @@ u32 msgdma_tx_completions(struct altera_tse_private *priv)
        u32 ready = 0;
        u32 inuse;
        u32 status;
-       struct msgdma_csr *txcsr =
-               (struct msgdma_csr *)priv->tx_dma_csr;
+       struct msgdma_csr *txcsr = priv->tx_dma_csr;
 
        /* Get number of sent descriptors */
        inuse = ioread32(&txcsr->rw_fill_level) & 0xffff;
@@ -154,7 +156,7 @@ u32 msgdma_tx_completions(struct altera_tse_private *priv)
 
 /* Put buffer to the mSGDMA RX FIFO
  */
-int msgdma_add_rx_desc(struct altera_tse_private *priv,
+void msgdma_add_rx_desc(struct altera_tse_private *priv,
                        struct tse_buffer *rxbuffer)
 {
        struct msgdma_extended_desc *desc = priv->rx_dma_desc;
@@ -175,7 +177,6 @@ int msgdma_add_rx_desc(struct altera_tse_private *priv,
        iowrite32(0, &desc->burst_seq_num);
        iowrite32(0x00010001, &desc->stride);
        iowrite32(control, &desc->control);
-       return 1;
 }
 
 /* status is returned on upper 16 bits,
@@ -186,10 +187,8 @@ u32 msgdma_rx_status(struct altera_tse_private *priv)
        u32 rxstatus = 0;
        u32 pktlength;
        u32 pktstatus;
-       struct msgdma_csr *rxcsr =
-               (struct msgdma_csr *)priv->rx_dma_csr;
-       struct msgdma_response *rxresp =
-               (struct msgdma_response *)priv->rx_dma_resp;
+       struct msgdma_csr *rxcsr = priv->rx_dma_csr;
+       struct msgdma_response *rxresp = priv->rx_dma_resp;
 
        if (ioread32(&rxcsr->resp_fill_level) & 0xffff) {
                pktlength = ioread32(&rxresp->bytes_transferred);
index 7f0f5bf2bba2f42952ea69307c44a334f6784b59..42cf61c81057be8f4c3f2213dfdd9e89de7dee15 100644 (file)
@@ -25,10 +25,11 @@ void msgdma_disable_txirq(struct altera_tse_private *);
 void msgdma_clear_rxirq(struct altera_tse_private *);
 void msgdma_clear_txirq(struct altera_tse_private *);
 u32 msgdma_tx_completions(struct altera_tse_private *);
-int msgdma_add_rx_desc(struct altera_tse_private *, struct tse_buffer *);
+void msgdma_add_rx_desc(struct altera_tse_private *, struct tse_buffer *);
 int msgdma_tx_buffer(struct altera_tse_private *, struct tse_buffer *);
 u32 msgdma_rx_status(struct altera_tse_private *);
 int msgdma_initialize(struct altera_tse_private *);
 void msgdma_uninitialize(struct altera_tse_private *);
+void msgdma_start_rxdma(struct altera_tse_private *);
 
 #endif /*  __ALTERA_MSGDMA_H__ */
index 0ee96639ae44e7a238ac36682868443caa7c43c3..dbd40e15b5ccc291325cea972a0c250cc5a84a0e 100644 (file)
 #include "altera_sgdmahw.h"
 #include "altera_sgdma.h"
 
-static void sgdma_descrip(struct sgdma_descrip *desc,
-                         struct sgdma_descrip *ndesc,
-                         dma_addr_t ndesc_phys,
-                         dma_addr_t raddr,
-                         dma_addr_t waddr,
-                         u16 length,
-                         int generate_eop,
-                         int rfixed,
-                         int wfixed);
+static void sgdma_setup_descrip(struct sgdma_descrip *desc,
+                               struct sgdma_descrip *ndesc,
+                               dma_addr_t ndesc_phys,
+                               dma_addr_t raddr,
+                               dma_addr_t waddr,
+                               u16 length,
+                               int generate_eop,
+                               int rfixed,
+                               int wfixed);
 
 static int sgdma_async_write(struct altera_tse_private *priv,
                              struct sgdma_descrip *desc);
@@ -64,11 +64,15 @@ queue_rx_peekhead(struct altera_tse_private *priv);
 
 int sgdma_initialize(struct altera_tse_private *priv)
 {
-       priv->txctrlreg = SGDMA_CTRLREG_ILASTD;
+       priv->txctrlreg = SGDMA_CTRLREG_ILASTD |
+                     SGDMA_CTRLREG_INTEN;
 
        priv->rxctrlreg = SGDMA_CTRLREG_IDESCRIP |
+                     SGDMA_CTRLREG_INTEN |
                      SGDMA_CTRLREG_ILASTD;
 
+       priv->sgdmadesclen = sizeof(struct sgdma_descrip);
+
        INIT_LIST_HEAD(&priv->txlisthd);
        INIT_LIST_HEAD(&priv->rxlisthd);
 
@@ -93,6 +97,16 @@ int sgdma_initialize(struct altera_tse_private *priv)
                return -EINVAL;
        }
 
+       /* Initialize descriptor memory to all 0's, sync memory to cache */
+       memset(priv->tx_dma_desc, 0, priv->txdescmem);
+       memset(priv->rx_dma_desc, 0, priv->rxdescmem);
+
+       dma_sync_single_for_device(priv->device, priv->txdescphys,
+                                  priv->txdescmem, DMA_TO_DEVICE);
+
+       dma_sync_single_for_device(priv->device, priv->rxdescphys,
+                                  priv->rxdescmem, DMA_TO_DEVICE);
+
        return 0;
 }
 
@@ -112,12 +126,12 @@ void sgdma_uninitialize(struct altera_tse_private *priv)
  */
 void sgdma_reset(struct altera_tse_private *priv)
 {
-       u32 *ptxdescripmem = (u32 *)priv->tx_dma_desc;
+       u32 *ptxdescripmem = priv->tx_dma_desc;
        u32 txdescriplen   = priv->txdescmem;
-       u32 *prxdescripmem = (u32 *)priv->rx_dma_desc;
+       u32 *prxdescripmem = priv->rx_dma_desc;
        u32 rxdescriplen   = priv->rxdescmem;
-       struct sgdma_csr *ptxsgdma = (struct sgdma_csr *)priv->tx_dma_csr;
-       struct sgdma_csr *prxsgdma = (struct sgdma_csr *)priv->rx_dma_csr;
+       struct sgdma_csr *ptxsgdma = priv->tx_dma_csr;
+       struct sgdma_csr *prxsgdma = priv->rx_dma_csr;
 
        /* Initialize descriptor memory to 0 */
        memset(ptxdescripmem, 0, txdescriplen);
@@ -130,39 +144,36 @@ void sgdma_reset(struct altera_tse_private *priv)
        iowrite32(0, &prxsgdma->control);
 }
 
+/* For SGDMA, interrupts remain enabled after initially enabling,
+ * so no need to provide implementations for abstract enable
+ * and disable
+ */
+
 void sgdma_enable_rxirq(struct altera_tse_private *priv)
 {
-       struct sgdma_csr *csr = (struct sgdma_csr *)priv->rx_dma_csr;
-       priv->rxctrlreg |= SGDMA_CTRLREG_INTEN;
-       tse_set_bit(&csr->control, SGDMA_CTRLREG_INTEN);
 }
 
 void sgdma_enable_txirq(struct altera_tse_private *priv)
 {
-       struct sgdma_csr *csr = (struct sgdma_csr *)priv->tx_dma_csr;
-       priv->txctrlreg |= SGDMA_CTRLREG_INTEN;
-       tse_set_bit(&csr->control, SGDMA_CTRLREG_INTEN);
 }
 
-/* for SGDMA, RX interrupts remain enabled after enabling */
 void sgdma_disable_rxirq(struct altera_tse_private *priv)
 {
 }
 
-/* for SGDMA, TX interrupts remain enabled after enabling */
 void sgdma_disable_txirq(struct altera_tse_private *priv)
 {
 }
 
 void sgdma_clear_rxirq(struct altera_tse_private *priv)
 {
-       struct sgdma_csr *csr = (struct sgdma_csr *)priv->rx_dma_csr;
+       struct sgdma_csr *csr = priv->rx_dma_csr;
        tse_set_bit(&csr->control, SGDMA_CTRLREG_CLRINT);
 }
 
 void sgdma_clear_txirq(struct altera_tse_private *priv)
 {
-       struct sgdma_csr *csr = (struct sgdma_csr *)priv->tx_dma_csr;
+       struct sgdma_csr *csr = priv->tx_dma_csr;
        tse_set_bit(&csr->control, SGDMA_CTRLREG_CLRINT);
 }
 
@@ -174,8 +185,7 @@ void sgdma_clear_txirq(struct altera_tse_private *priv)
 int sgdma_tx_buffer(struct altera_tse_private *priv, struct tse_buffer *buffer)
 {
        int pktstx = 0;
-       struct sgdma_descrip *descbase =
-               (struct sgdma_descrip *)priv->tx_dma_desc;
+       struct sgdma_descrip *descbase = priv->tx_dma_desc;
 
        struct sgdma_descrip *cdesc = &descbase[0];
        struct sgdma_descrip *ndesc = &descbase[1];
@@ -184,15 +194,15 @@ int sgdma_tx_buffer(struct altera_tse_private *priv, struct tse_buffer *buffer)
        if (sgdma_txbusy(priv))
                return 0;
 
-       sgdma_descrip(cdesc,                    /* current descriptor */
-                     ndesc,                    /* next descriptor */
-                     sgdma_txphysaddr(priv, ndesc),
-                     buffer->dma_addr,         /* address of packet to xmit */
-                     0,                        /* write addr 0 for tx dma */
-                     buffer->len,              /* length of packet */
-                     SGDMA_CONTROL_EOP,        /* Generate EOP */
-                     0,                        /* read fixed */
-                     SGDMA_CONTROL_WR_FIXED);  /* Generate SOP */
+       sgdma_setup_descrip(cdesc,                      /* current descriptor */
+                           ndesc,                      /* next descriptor */
+                           sgdma_txphysaddr(priv, ndesc),
+                           buffer->dma_addr,           /* address of packet to xmit */
+                           0,                          /* write addr 0 for tx dma */
+                           buffer->len,                /* length of packet */
+                           SGDMA_CONTROL_EOP,          /* Generate EOP */
+                           0,                          /* read fixed */
+                           SGDMA_CONTROL_WR_FIXED);    /* Generate SOP */
 
        pktstx = sgdma_async_write(priv, cdesc);
 
@@ -208,7 +218,7 @@ int sgdma_tx_buffer(struct altera_tse_private *priv, struct tse_buffer *buffer)
 u32 sgdma_tx_completions(struct altera_tse_private *priv)
 {
        u32 ready = 0;
-       struct sgdma_descrip *desc = (struct sgdma_descrip *)priv->tx_dma_desc;
+       struct sgdma_descrip *desc = priv->tx_dma_desc;
 
        if (!sgdma_txbusy(priv) &&
            ((desc->control & SGDMA_CONTROL_HW_OWNED) == 0) &&
@@ -219,11 +229,15 @@ u32 sgdma_tx_completions(struct altera_tse_private *priv)
        return ready;
 }
 
-int sgdma_add_rx_desc(struct altera_tse_private *priv,
-                     struct tse_buffer *rxbuffer)
+void sgdma_start_rxdma(struct altera_tse_private *priv)
+{
+       sgdma_async_read(priv);
+}
+
+void sgdma_add_rx_desc(struct altera_tse_private *priv,
+                      struct tse_buffer *rxbuffer)
 {
        queue_rx(priv, rxbuffer);
-       return sgdma_async_read(priv);
 }
 
 /* status is returned on upper 16 bits,
@@ -231,8 +245,8 @@ int sgdma_add_rx_desc(struct altera_tse_private *priv,
  */
 u32 sgdma_rx_status(struct altera_tse_private *priv)
 {
-       struct sgdma_csr *csr = (struct sgdma_csr *)priv->rx_dma_csr;
-       struct sgdma_descrip *base = (struct sgdma_descrip *)priv->rx_dma_desc;
+       struct sgdma_csr *csr = priv->rx_dma_csr;
+       struct sgdma_descrip *base = priv->rx_dma_desc;
        struct sgdma_descrip *desc = NULL;
        int pktsrx;
        unsigned int rxstatus = 0;
@@ -240,28 +254,52 @@ u32 sgdma_rx_status(struct altera_tse_private *priv)
        unsigned int pktstatus = 0;
        struct tse_buffer *rxbuffer = NULL;
 
-       dma_sync_single_for_cpu(priv->device,
-                               priv->rxdescphys,
-                               priv->rxdescmem,
-                               DMA_BIDIRECTIONAL);
+       u32 sts = ioread32(&csr->status);
 
        desc = &base[0];
-       if ((ioread32(&csr->status) & SGDMA_STSREG_EOP) ||
-           (desc->status & SGDMA_STATUS_EOP)) {
+       if (sts & SGDMA_STSREG_EOP) {
+               dma_sync_single_for_cpu(priv->device,
+                                       priv->rxdescphys,
+                                       priv->sgdmadesclen,
+                                       DMA_FROM_DEVICE);
+
                pktlength = desc->bytes_xferred;
                pktstatus = desc->status & 0x3f;
                rxstatus = pktstatus;
                rxstatus = rxstatus << 16;
                rxstatus |= (pktlength & 0xffff);
 
-               desc->status = 0;
+               if (rxstatus) {
+                       desc->status = 0;
 
-               rxbuffer = dequeue_rx(priv);
-               if (rxbuffer == NULL)
-                       netdev_err(priv->dev,
-                                  "sgdma rx and rx queue empty!\n");
+                       rxbuffer = dequeue_rx(priv);
+                       if (rxbuffer == NULL)
+                               netdev_info(priv->dev,
+                                           "sgdma rx and rx queue empty!\n");
+
+                       /* Clear control */
+                       iowrite32(0, &csr->control);
+                       /* clear status */
+                       iowrite32(0xf, &csr->status);
 
-               /* kick the rx sgdma after reaping this descriptor */
+                       /* kick the rx sgdma after reaping this descriptor */
+                       pktsrx = sgdma_async_read(priv);
+
+               } else {
+                       /* If the SGDMA indicated an end of packet on recv,
+                        * then it's expected that the rxstatus from the
+                        * descriptor is non-zero - meaning a valid packet
+                        * with a nonzero length, or an error has been
+                        * indicated. if not, then all we can do is signal
+                        * an error and return no packet received. Most likely
+                        * there is a system design error, or an error in the
+                        * underlying kernel (cache or cache management problem)
+                        */
+                       netdev_err(priv->dev,
+                                  "SGDMA RX Error Info: %x, %x, %x\n",
+                                  sts, desc->status, rxstatus);
+               }
+       } else if (sts == 0) {
                pktsrx = sgdma_async_read(priv);
        }
 
@@ -270,15 +308,15 @@ u32 sgdma_rx_status(struct altera_tse_private *priv)
 
 
 /* Private functions */
-static void sgdma_descrip(struct sgdma_descrip *desc,
-                         struct sgdma_descrip *ndesc,
-                         dma_addr_t ndesc_phys,
-                         dma_addr_t raddr,
-                         dma_addr_t waddr,
-                         u16 length,
-                         int generate_eop,
-                         int rfixed,
-                         int wfixed)
+static void sgdma_setup_descrip(struct sgdma_descrip *desc,
+                               struct sgdma_descrip *ndesc,
+                               dma_addr_t ndesc_phys,
+                               dma_addr_t raddr,
+                               dma_addr_t waddr,
+                               u16 length,
+                               int generate_eop,
+                               int rfixed,
+                               int wfixed)
 {
        /* Clear the next descriptor as not owned by hardware */
        u32 ctrl = ndesc->control;
@@ -312,42 +350,34 @@ static void sgdma_descrip(struct sgdma_descrip *desc,
  */
 static int sgdma_async_read(struct altera_tse_private *priv)
 {
-       struct sgdma_csr *csr = (struct sgdma_csr *)priv->rx_dma_csr;
-       struct sgdma_descrip *descbase =
-               (struct sgdma_descrip *)priv->rx_dma_desc;
-
+       struct sgdma_csr *csr = priv->rx_dma_csr;
+       struct sgdma_descrip *descbase = priv->rx_dma_desc;
        struct sgdma_descrip *cdesc = &descbase[0];
        struct sgdma_descrip *ndesc = &descbase[1];
 
-       unsigned int sts = ioread32(&csr->status);
        struct tse_buffer *rxbuffer = NULL;
 
        if (!sgdma_rxbusy(priv)) {
                rxbuffer = queue_rx_peekhead(priv);
-               if (rxbuffer == NULL)
+               if (rxbuffer == NULL) {
+                       netdev_err(priv->dev, "no rx buffers available\n");
                        return 0;
-
-               sgdma_descrip(cdesc,            /* current descriptor */
-                             ndesc,            /* next descriptor */
-                             sgdma_rxphysaddr(priv, ndesc),
-                             0,                /* read addr 0 for rx dma */
-                             rxbuffer->dma_addr, /* write addr for rx dma */
-                             0,                /* read 'til EOP */
-                             0,                /* EOP: NA for rx dma */
-                             0,                /* read fixed: NA for rx dma */
-                             0);               /* SOP: NA for rx DMA */
-
-               /* clear control and status */
-               iowrite32(0, &csr->control);
-
-               /* If status available, clear those bits */
-               if (sts & 0xf)
-                       iowrite32(0xf, &csr->status);
+               }
+
+               sgdma_setup_descrip(cdesc,              /* current descriptor */
+                                   ndesc,              /* next descriptor */
+                                   sgdma_rxphysaddr(priv, ndesc),
+                                   0,                  /* read addr 0 for rx dma */
+                                   rxbuffer->dma_addr, /* write addr for rx dma */
+                                   0,                  /* read 'til EOP */
+                                   0,                  /* EOP: NA for rx dma */
+                                   0,                  /* read fixed: NA for rx dma */
+                                   0);                 /* SOP: NA for rx DMA */
 
                dma_sync_single_for_device(priv->device,
                                           priv->rxdescphys,
-                                          priv->rxdescmem,
-                                          DMA_BIDIRECTIONAL);
+                                          priv->sgdmadesclen,
+                                          DMA_TO_DEVICE);
 
                iowrite32(lower_32_bits(sgdma_rxphysaddr(priv, cdesc)),
                          &csr->next_descrip);
@@ -364,7 +394,7 @@ static int sgdma_async_read(struct altera_tse_private *priv)
 static int sgdma_async_write(struct altera_tse_private *priv,
                             struct sgdma_descrip *desc)
 {
-       struct sgdma_csr *csr = (struct sgdma_csr *)priv->tx_dma_csr;
+       struct sgdma_csr *csr = priv->tx_dma_csr;
 
        if (sgdma_txbusy(priv))
                return 0;
@@ -374,7 +404,7 @@ static int sgdma_async_write(struct altera_tse_private *priv,
        iowrite32(0x1f, &csr->status);
 
        dma_sync_single_for_device(priv->device, priv->txdescphys,
-                                  priv->txdescmem, DMA_TO_DEVICE);
+                                  priv->sgdmadesclen, DMA_TO_DEVICE);
 
        iowrite32(lower_32_bits(sgdma_txphysaddr(priv, desc)),
                  &csr->next_descrip);
@@ -485,7 +515,7 @@ queue_rx_peekhead(struct altera_tse_private *priv)
  */
 static int sgdma_rxbusy(struct altera_tse_private *priv)
 {
-       struct sgdma_csr *csr = (struct sgdma_csr *)priv->rx_dma_csr;
+       struct sgdma_csr *csr = priv->rx_dma_csr;
        return ioread32(&csr->status) & SGDMA_STSREG_BUSY;
 }
 
@@ -495,7 +525,7 @@ static int sgdma_rxbusy(struct altera_tse_private *priv)
 static int sgdma_txbusy(struct altera_tse_private *priv)
 {
        int delay = 0;
-       struct sgdma_csr *csr = (struct sgdma_csr *)priv->tx_dma_csr;
+       struct sgdma_csr *csr = priv->tx_dma_csr;
 
        /* if DMA is busy, wait for current transactino to finish */
        while ((ioread32(&csr->status) & SGDMA_STSREG_BUSY) && (delay++ < 100))
index 07d471729dc4978deee7fc592962999ec1eff1f3..584977e29ef944e1132f3ec779d7cdf1f9572160 100644 (file)
@@ -26,10 +26,11 @@ void sgdma_clear_rxirq(struct altera_tse_private *);
 void sgdma_clear_txirq(struct altera_tse_private *);
 int sgdma_tx_buffer(struct altera_tse_private *priv, struct tse_buffer *);
 u32 sgdma_tx_completions(struct altera_tse_private *);
-int sgdma_add_rx_desc(struct altera_tse_private *priv, struct tse_buffer *);
+void sgdma_add_rx_desc(struct altera_tse_private *priv, struct tse_buffer *);
 void sgdma_status(struct altera_tse_private *);
 u32 sgdma_rx_status(struct altera_tse_private *);
 int sgdma_initialize(struct altera_tse_private *);
 void sgdma_uninitialize(struct altera_tse_private *);
+void sgdma_start_rxdma(struct altera_tse_private *);
 
 #endif /*  __ALTERA_SGDMA_H__ */
index 8feeed05de0e14829718a1bec85fbb94af6699e6..465c4aabebbd49d299cb266544c3469acd0f3b6b 100644 (file)
@@ -58,6 +58,8 @@
 /* MAC function configuration default settings */
 #define ALTERA_TSE_TX_IPG_LENGTH       12
 
+#define ALTERA_TSE_PAUSE_QUANTA                0xffff
+
 #define GET_BIT_VALUE(v, bit)          (((v) >> (bit)) & 0x1)
 
 /* MAC Command_Config Register Bit Definitions
@@ -390,10 +392,11 @@ struct altera_dmaops {
        void (*clear_rxirq)(struct altera_tse_private *);
        int (*tx_buffer)(struct altera_tse_private *, struct tse_buffer *);
        u32 (*tx_completions)(struct altera_tse_private *);
-       int (*add_rx_desc)(struct altera_tse_private *, struct tse_buffer *);
+       void (*add_rx_desc)(struct altera_tse_private *, struct tse_buffer *);
        u32 (*get_rx_status)(struct altera_tse_private *);
        int (*init_dma)(struct altera_tse_private *);
        void (*uninit_dma)(struct altera_tse_private *);
+       void (*start_rxdma)(struct altera_tse_private *);
 };
 
 /* This structure is private to each device.
@@ -453,6 +456,7 @@ struct altera_tse_private {
        u32 rxctrlreg;
        dma_addr_t rxdescphys;
        dma_addr_t txdescphys;
+       size_t sgdmadesclen;
 
        struct list_head txlisthd;
        struct list_head rxlisthd;
index 319ca74f5e7480b23bc0ce63f9dca0a25ce894b2..d817e285b266712e7f59a9805b2972664e7a3987 100644 (file)
@@ -77,7 +77,7 @@ static void tse_get_drvinfo(struct net_device *dev,
        struct altera_tse_private *priv = netdev_priv(dev);
        u32 rev = ioread32(&priv->mac_dev->megacore_revision);
 
-       strcpy(info->driver, "Altera TSE MAC IP Driver");
+       strcpy(info->driver, "altera_tse");
        strcpy(info->version, "v8.0");
        snprintf(info->fw_version, ETHTOOL_FWVERS_LEN, "v%d.%d",
                 rev & 0xFFFF, (rev & 0xFFFF0000) >> 16);
@@ -185,6 +185,12 @@ static void tse_get_regs(struct net_device *dev, struct ethtool_regs *regs,
         * how to do any special formatting of this data.
         * This version number will need to change if and
         * when this register table is changed.
+        *
+        * version[31:0] = 1: Dump the first 128 TSE Registers
+        *      Upper bits are all 0 by default
+        *
+        * Upper 16-bits will indicate feature presence for
+        * Ethtool register decoding in future version.
         */
 
        regs->version = 1;
@@ -231,5 +237,5 @@ static const struct ethtool_ops tse_ethtool_ops = {
 
 void altera_tse_set_ethtool_ops(struct net_device *netdev)
 {
-       SET_ETHTOOL_OPS(netdev, &tse_ethtool_ops);
+       netdev->ethtool_ops = &tse_ethtool_ops;
 }
index c70a29e0b9f79115cc697df95b52757830651ea3..e44a4aeb970142a6622d77c52b30a27c58a6e786 100644 (file)
@@ -224,6 +224,7 @@ static int tse_init_rx_buffer(struct altera_tse_private *priv,
                dev_kfree_skb_any(rxbuffer->skb);
                return -EINVAL;
        }
+       rxbuffer->dma_addr &= (dma_addr_t)~3;
        rxbuffer->len = len;
        return 0;
 }
@@ -425,9 +426,10 @@ static int tse_rx(struct altera_tse_private *priv, int limit)
                priv->dev->stats.rx_bytes += pktlength;
 
                entry = next_entry;
+
+               tse_rx_refill(priv);
        }
 
-       tse_rx_refill(priv);
        return count;
 }
 
@@ -520,7 +522,6 @@ static irqreturn_t altera_isr(int irq, void *dev_id)
        struct altera_tse_private *priv;
        unsigned long int flags;
 
-
        if (unlikely(!dev)) {
                pr_err("%s: invalid dev pointer\n", __func__);
                return IRQ_NONE;
@@ -868,13 +869,13 @@ static int init_mac(struct altera_tse_private *priv)
        /* Disable RX/TX shift 16 for alignment of all received frames on 16-bit
         * start address
         */
-       tse_clear_bit(&mac->rx_cmd_stat, ALTERA_TSE_RX_CMD_STAT_RX_SHIFT16);
+       tse_set_bit(&mac->rx_cmd_stat, ALTERA_TSE_RX_CMD_STAT_RX_SHIFT16);
        tse_clear_bit(&mac->tx_cmd_stat, ALTERA_TSE_TX_CMD_STAT_TX_SHIFT16 |
                                         ALTERA_TSE_TX_CMD_STAT_OMIT_CRC);
 
        /* Set the MAC options */
        cmd = ioread32(&mac->command_config);
-       cmd |= MAC_CMDCFG_PAD_EN;       /* Padding Removal on Receive */
+       cmd &= ~MAC_CMDCFG_PAD_EN;      /* No padding Removal on Receive */
        cmd &= ~MAC_CMDCFG_CRC_FWD;     /* CRC Removal */
        cmd |= MAC_CMDCFG_RX_ERR_DISC;  /* Automatically discard frames
                                         * with CRC errors
@@ -882,8 +883,16 @@ static int init_mac(struct altera_tse_private *priv)
        cmd |= MAC_CMDCFG_CNTL_FRM_ENA;
        cmd &= ~MAC_CMDCFG_TX_ENA;
        cmd &= ~MAC_CMDCFG_RX_ENA;
+
+       /* Default speed and duplex setting, full/100 */
+       cmd &= ~MAC_CMDCFG_HD_ENA;
+       cmd &= ~MAC_CMDCFG_ETH_SPEED;
+       cmd &= ~MAC_CMDCFG_ENA_10;
+
        iowrite32(cmd, &mac->command_config);
 
+       iowrite32(ALTERA_TSE_PAUSE_QUANTA, &mac->pause_quanta);
+
        if (netif_msg_hw(priv))
                dev_dbg(priv->device,
                        "MAC post-initialization: CMD_CONFIG = 0x%08x\n", cmd);
@@ -1085,17 +1094,19 @@ static int tse_open(struct net_device *dev)
 
        spin_unlock_irqrestore(&priv->rxdma_irq_lock, flags);
 
-       /* Start MAC Rx/Tx */
-       spin_lock(&priv->mac_cfg_lock);
-       tse_set_mac(priv, true);
-       spin_unlock(&priv->mac_cfg_lock);
-
        if (priv->phydev)
                phy_start(priv->phydev);
 
        napi_enable(&priv->napi);
        netif_start_queue(dev);
 
+       priv->dmaops->start_rxdma(priv);
+
+       /* Start MAC Rx/Tx */
+       spin_lock(&priv->mac_cfg_lock);
+       tse_set_mac(priv, true);
+       spin_unlock(&priv->mac_cfg_lock);
+
        return 0;
 
 tx_request_irq_error:
@@ -1167,7 +1178,6 @@ static struct net_device_ops altera_tse_netdev_ops = {
        .ndo_validate_addr      = eth_validate_addr,
 };
 
-
 static int request_and_map(struct platform_device *pdev, const char *name,
                           struct resource **res, void __iomem **ptr)
 {
@@ -1235,7 +1245,7 @@ static int altera_tse_probe(struct platform_device *pdev)
                /* Get the mapped address to the SGDMA descriptor memory */
                ret = request_and_map(pdev, "s1", &dma_res, &descmap);
                if (ret)
-                       goto out_free;
+                       goto err_free_netdev;
 
                /* Start of that memory is for transmit descriptors */
                priv->tx_dma_desc = descmap;
@@ -1254,24 +1264,24 @@ static int altera_tse_probe(struct platform_device *pdev)
                if (upper_32_bits(priv->rxdescmem_busaddr)) {
                        dev_dbg(priv->device,
                                "SGDMA bus addresses greater than 32-bits\n");
-                       goto out_free;
+                       goto err_free_netdev;
                }
                if (upper_32_bits(priv->txdescmem_busaddr)) {
                        dev_dbg(priv->device,
                                "SGDMA bus addresses greater than 32-bits\n");
-                       goto out_free;
+                       goto err_free_netdev;
                }
        } else if (priv->dmaops &&
                   priv->dmaops->altera_dtype == ALTERA_DTYPE_MSGDMA) {
                ret = request_and_map(pdev, "rx_resp", &dma_res,
                                      &priv->rx_dma_resp);
                if (ret)
-                       goto out_free;
+                       goto err_free_netdev;
 
                ret = request_and_map(pdev, "tx_desc", &dma_res,
                                      &priv->tx_dma_desc);
                if (ret)
-                       goto out_free;
+                       goto err_free_netdev;
 
                priv->txdescmem = resource_size(dma_res);
                priv->txdescmem_busaddr = dma_res->start;
@@ -1279,13 +1289,13 @@ static int altera_tse_probe(struct platform_device *pdev)
                ret = request_and_map(pdev, "rx_desc", &dma_res,
                                      &priv->rx_dma_desc);
                if (ret)
-                       goto out_free;
+                       goto err_free_netdev;
 
                priv->rxdescmem = resource_size(dma_res);
                priv->rxdescmem_busaddr = dma_res->start;
 
        } else {
-               goto out_free;
+               goto err_free_netdev;
        }
 
        if (!dma_set_mask(priv->device, DMA_BIT_MASK(priv->dmaops->dmamask)))
@@ -1294,26 +1304,26 @@ static int altera_tse_probe(struct platform_device *pdev)
        else if (!dma_set_mask(priv->device, DMA_BIT_MASK(32)))
                dma_set_coherent_mask(priv->device, DMA_BIT_MASK(32));
        else
-               goto out_free;
+               goto err_free_netdev;
 
        /* MAC address space */
        ret = request_and_map(pdev, "control_port", &control_port,
                              (void __iomem **)&priv->mac_dev);
        if (ret)
-               goto out_free;
+               goto err_free_netdev;
 
        /* xSGDMA Rx Dispatcher address space */
        ret = request_and_map(pdev, "rx_csr", &dma_res,
                              &priv->rx_dma_csr);
        if (ret)
-               goto out_free;
+               goto err_free_netdev;
 
 
        /* xSGDMA Tx Dispatcher address space */
        ret = request_and_map(pdev, "tx_csr", &dma_res,
                              &priv->tx_dma_csr);
        if (ret)
-               goto out_free;
+               goto err_free_netdev;
 
 
        /* Rx IRQ */
@@ -1321,7 +1331,7 @@ static int altera_tse_probe(struct platform_device *pdev)
        if (priv->rx_irq == -ENXIO) {
                dev_err(&pdev->dev, "cannot obtain Rx IRQ\n");
                ret = -ENXIO;
-               goto out_free;
+               goto err_free_netdev;
        }
 
        /* Tx IRQ */
@@ -1329,7 +1339,7 @@ static int altera_tse_probe(struct platform_device *pdev)
        if (priv->tx_irq == -ENXIO) {
                dev_err(&pdev->dev, "cannot obtain Tx IRQ\n");
                ret = -ENXIO;
-               goto out_free;
+               goto err_free_netdev;
        }
 
        /* get FIFO depths from device tree */
@@ -1337,14 +1347,14 @@ static int altera_tse_probe(struct platform_device *pdev)
                                 &priv->rx_fifo_depth)) {
                dev_err(&pdev->dev, "cannot obtain rx-fifo-depth\n");
                ret = -ENXIO;
-               goto out_free;
+               goto err_free_netdev;
        }
 
        if (of_property_read_u32(pdev->dev.of_node, "tx-fifo-depth",
                                 &priv->rx_fifo_depth)) {
                dev_err(&pdev->dev, "cannot obtain tx-fifo-depth\n");
                ret = -ENXIO;
-               goto out_free;
+               goto err_free_netdev;
        }
 
        /* get hash filter settings for this instance */
@@ -1393,7 +1403,7 @@ static int altera_tse_probe(struct platform_device *pdev)
              ((priv->phy_addr >= 0) && (priv->phy_addr < PHY_MAX_ADDR)))) {
                dev_err(&pdev->dev, "invalid phy-addr specified %d\n",
                        priv->phy_addr);
-               goto out_free;
+               goto err_free_netdev;
        }
 
        /* Create/attach to MDIO bus */
@@ -1401,7 +1411,7 @@ static int altera_tse_probe(struct platform_device *pdev)
                                     atomic_add_return(1, &instance_count));
 
        if (ret)
-               goto out_free;
+               goto err_free_netdev;
 
        /* initialize netdev */
        ether_setup(ndev);
@@ -1438,7 +1448,7 @@ static int altera_tse_probe(struct platform_device *pdev)
        ret = register_netdev(ndev);
        if (ret) {
                dev_err(&pdev->dev, "failed to register TSE net device\n");
-               goto out_free_mdio;
+               goto err_register_netdev;
        }
 
        platform_set_drvdata(pdev, ndev);
@@ -1455,13 +1465,16 @@ static int altera_tse_probe(struct platform_device *pdev)
        ret = init_phy(ndev);
        if (ret != 0) {
                netdev_err(ndev, "Cannot attach to PHY (error: %d)\n", ret);
-               goto out_free_mdio;
+               goto err_init_phy;
        }
        return 0;
 
-out_free_mdio:
+err_init_phy:
+       unregister_netdev(ndev);
+err_register_netdev:
+       netif_napi_del(&priv->napi);
        altera_tse_mdio_destroy(ndev);
-out_free:
+err_free_netdev:
        free_netdev(ndev);
        return ret;
 }
@@ -1496,6 +1509,7 @@ struct altera_dmaops altera_dtype_sgdma = {
        .get_rx_status = sgdma_rx_status,
        .init_dma = sgdma_initialize,
        .uninit_dma = sgdma_uninitialize,
+       .start_rxdma = sgdma_start_rxdma,
 };
 
 struct altera_dmaops altera_dtype_msgdma = {
@@ -1514,6 +1528,7 @@ struct altera_dmaops altera_dtype_msgdma = {
        .get_rx_status = msgdma_rx_status,
        .init_dma = msgdma_initialize,
        .uninit_dma = msgdma_uninitialize,
+       .start_rxdma = msgdma_start_rxdma,
 };
 
 static struct of_device_id altera_tse_ids[] = {
index 26efaaa5e73fd292de512fc428e9af84126fcbb9..068dc7cad5fa3c511c34b034c006add6284bb097 100644 (file)
@@ -1900,7 +1900,7 @@ static int amd8111e_probe_one(struct pci_dev *pdev,
 
        /* Initialize driver entry points */
        dev->netdev_ops = &amd8111e_netdev_ops;
-       SET_ETHTOOL_OPS(dev, &ops);
+       dev->ethtool_ops = &ops;
        dev->irq =pdev->irq;
        dev->watchdog_timeo = AMD8111E_TX_TIMEOUT;
        netif_napi_add(dev, &lp->napi, amd8111e_rx_poll, 32);
index b08101b31b8bc547ffbfb486f87abc7a18f92686..968b7bfac8fcaa9af767c57c474a83b3229b09a0 100644 (file)
@@ -718,7 +718,6 @@ static int ariadne_init_one(struct zorro_dev *z,
        unsigned long mem_start = board + ARIADNE_RAM;
        struct resource *r1, *r2;
        struct net_device *dev;
-       struct ariadne_private *priv;
        u32 serial;
        int err;
 
@@ -738,8 +737,6 @@ static int ariadne_init_one(struct zorro_dev *z,
                return -ENOMEM;
        }
 
-       priv = netdev_priv(dev);
-
        r1->name = dev->name;
        r2->name = dev->name;
 
index a2bd91e3d302acce0d727cc3ef1eda27e56040cb..a78e4c13695980e295ead2c8d85fd6f6a351f5c8 100644 (file)
@@ -1229,7 +1229,7 @@ static int au1000_probe(struct platform_device *pdev)
        dev->base_addr = base->start;
        dev->irq = irq;
        dev->netdev_ops = &au1000_netdev_ops;
-       SET_ETHTOOL_OPS(dev, &au1000_ethtool_ops);
+       dev->ethtool_ops = &au1000_ethtool_ops;
        dev->watchdog_timeo = ETH_TX_TIMEOUT;
 
        /*
index 08569fe2b182c2bef930ef582320b9ccb9cfc952..abf3b1581c82a2eacbf7074b3c9db9ca12fbef19 100644 (file)
@@ -457,7 +457,7 @@ static int nmclan_probe(struct pcmcia_device *link)
     lp->tx_free_frames=AM2150_MAX_TX_FRAMES;
 
     dev->netdev_ops = &mace_netdev_ops;
-    SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
+    dev->ethtool_ops = &netdev_ethtool_ops;
     dev->watchdog_timeo = TX_TIMEOUT;
 
     return nmclan_config(link);
index 928fac6dd10a90dca66244dba99194c9c9e2924d..53f85bf715268db94d864695321e3890454719ce 100644 (file)
@@ -11,6 +11,7 @@
 #include <linux/dma-mapping.h>
 #include <linux/netdevice.h>
 #include <linux/phy.h>
+#include <linux/clk.h>
 
 /* STATUS and ENABLE Register bit masks */
 #define TXINT_MASK     (1<<0)  /* Transmit interrupt */
@@ -131,6 +132,7 @@ struct arc_emac_priv {
        struct mii_bus *bus;
 
        void __iomem *regs;
+       struct clk *clk;
 
        struct napi_struct napi;
        struct net_device_stats stats;
index eeecc29cf5b7d2695739d819fe900e798bf38996..18e2faccebb0dcb98bc19bdc333561776aec6b95 100644 (file)
@@ -13,6 +13,7 @@
  *             Vineet Gupta
  */
 
+#include <linux/crc32.h>
 #include <linux/etherdevice.h>
 #include <linux/interrupt.h>
 #include <linux/io.h>
@@ -362,6 +363,15 @@ static irqreturn_t arc_emac_intr(int irq, void *dev_instance)
        return IRQ_HANDLED;
 }
 
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void arc_emac_poll_controller(struct net_device *dev)
+{
+       disable_irq(dev->irq);
+       arc_emac_intr(dev->irq, dev);
+       enable_irq(dev->irq);
+}
+#endif
+
 /**
  * arc_emac_open - Open the network device.
  * @ndev:      Pointer to the network device.
@@ -450,6 +460,41 @@ static int arc_emac_open(struct net_device *ndev)
        return 0;
 }
 
+/**
+ * arc_emac_set_rx_mode - Change the receive filtering mode.
+ * @ndev:      Pointer to the network device.
+ *
+ * This function enables/disables promiscuous or all-multicast mode
+ * and updates the multicast filtering list of the network device.
+ */
+static void arc_emac_set_rx_mode(struct net_device *ndev)
+{
+       struct arc_emac_priv *priv = netdev_priv(ndev);
+
+       if (ndev->flags & IFF_PROMISC) {
+               arc_reg_or(priv, R_CTRL, PROM_MASK);
+       } else {
+               arc_reg_clr(priv, R_CTRL, PROM_MASK);
+
+               if (ndev->flags & IFF_ALLMULTI) {
+                       arc_reg_set(priv, R_LAFL, ~0);
+                       arc_reg_set(priv, R_LAFH, ~0);
+               } else {
+                       struct netdev_hw_addr *ha;
+                       unsigned int filter[2] = { 0, 0 };
+                       int bit;
+
+                       netdev_for_each_mc_addr(ha, ndev) {
+                               bit = ether_crc_le(ETH_ALEN, ha->addr) >> 26;
+                               filter[bit >> 5] |= 1 << (bit & 31);
+                       }
+
+                       arc_reg_set(priv, R_LAFL, filter[0]);
+                       arc_reg_set(priv, R_LAFH, filter[1]);
+               }
+       }
+}
+
 /**
  * arc_emac_stop - Close the network device.
  * @ndev:      Pointer to the network device.
@@ -574,6 +619,18 @@ static int arc_emac_tx(struct sk_buff *skb, struct net_device *ndev)
        return NETDEV_TX_OK;
 }
 
+static void arc_emac_set_address_internal(struct net_device *ndev)
+{
+       struct arc_emac_priv *priv = netdev_priv(ndev);
+       unsigned int addr_low, addr_hi;
+
+       addr_low = le32_to_cpu(*(__le32 *) &ndev->dev_addr[0]);
+       addr_hi = le16_to_cpu(*(__le16 *) &ndev->dev_addr[4]);
+
+       arc_reg_set(priv, R_ADDRL, addr_low);
+       arc_reg_set(priv, R_ADDRH, addr_hi);
+}
+
 /**
  * arc_emac_set_address - Set the MAC address for this device.
  * @ndev:      Pointer to net_device structure.
@@ -587,9 +644,7 @@ static int arc_emac_tx(struct sk_buff *skb, struct net_device *ndev)
  */
 static int arc_emac_set_address(struct net_device *ndev, void *p)
 {
-       struct arc_emac_priv *priv = netdev_priv(ndev);
        struct sockaddr *addr = p;
-       unsigned int addr_low, addr_hi;
 
        if (netif_running(ndev))
                return -EBUSY;
@@ -599,11 +654,7 @@ static int arc_emac_set_address(struct net_device *ndev, void *p)
 
        memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
 
-       addr_low = le32_to_cpu(*(__le32 *) &ndev->dev_addr[0]);
-       addr_hi = le16_to_cpu(*(__le16 *) &ndev->dev_addr[4]);
-
-       arc_reg_set(priv, R_ADDRL, addr_low);
-       arc_reg_set(priv, R_ADDRH, addr_hi);
+       arc_emac_set_address_internal(ndev);
 
        return 0;
 }
@@ -614,6 +665,10 @@ static const struct net_device_ops arc_emac_netdev_ops = {
        .ndo_start_xmit         = arc_emac_tx,
        .ndo_set_mac_address    = arc_emac_set_address,
        .ndo_get_stats          = arc_emac_stats,
+       .ndo_set_rx_mode        = arc_emac_set_rx_mode,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+       .ndo_poll_controller    = arc_emac_poll_controller,
+#endif
 };
 
 static int arc_emac_probe(struct platform_device *pdev)
@@ -643,13 +698,6 @@ static int arc_emac_probe(struct platform_device *pdev)
                return -ENODEV;
        }
 
-       /* Get CPU clock frequency from device tree */
-       if (of_property_read_u32(pdev->dev.of_node, "clock-frequency",
-                                &clock_frequency)) {
-               dev_err(&pdev->dev, "failed to retrieve <clock-frequency> from device tree\n");
-               return -EINVAL;
-       }
-
        /* Get IRQ from device tree */
        irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
        if (!irq) {
@@ -677,17 +725,36 @@ static int arc_emac_probe(struct platform_device *pdev)
        priv->regs = devm_ioremap_resource(&pdev->dev, &res_regs);
        if (IS_ERR(priv->regs)) {
                err = PTR_ERR(priv->regs);
-               goto out;
+               goto out_netdev;
        }
        dev_dbg(&pdev->dev, "Registers base address is 0x%p\n", priv->regs);
 
+       priv->clk = of_clk_get(pdev->dev.of_node, 0);
+       if (IS_ERR(priv->clk)) {
+               /* Get CPU clock frequency from device tree */
+               if (of_property_read_u32(pdev->dev.of_node, "clock-frequency",
+                                       &clock_frequency)) {
+                       dev_err(&pdev->dev, "failed to retrieve <clock-frequency> from device tree\n");
+                       err = -EINVAL;
+                       goto out_netdev;
+               }
+       } else {
+               err = clk_prepare_enable(priv->clk);
+               if (err) {
+                       dev_err(&pdev->dev, "failed to enable clock\n");
+                       goto out_clkget;
+               }
+
+               clock_frequency = clk_get_rate(priv->clk);
+       }
+
        id = arc_reg_get(priv, R_ID);
 
        /* Check for EMAC revision 5 or 7, magic number */
        if (!(id == 0x0005fd02 || id == 0x0007fd02)) {
                dev_err(&pdev->dev, "ARC EMAC not detected, id=0x%x\n", id);
                err = -ENODEV;
-               goto out;
+               goto out_clken;
        }
        dev_info(&pdev->dev, "ARC EMAC detected with id: 0x%x\n", id);
 
@@ -702,7 +769,7 @@ static int arc_emac_probe(struct platform_device *pdev)
                               ndev->name, ndev);
        if (err) {
                dev_err(&pdev->dev, "could not allocate IRQ\n");
-               goto out;
+               goto out_clken;
        }
 
        /* Get MAC address from device tree */
@@ -713,6 +780,7 @@ static int arc_emac_probe(struct platform_device *pdev)
        else
                eth_hw_addr_random(ndev);
 
+       arc_emac_set_address_internal(ndev);
        dev_info(&pdev->dev, "MAC address is now %pM\n", ndev->dev_addr);
 
        /* Do 1 allocation instead of 2 separate ones for Rx and Tx BD rings */
@@ -722,7 +790,7 @@ static int arc_emac_probe(struct platform_device *pdev)
        if (!priv->rxbd) {
                dev_err(&pdev->dev, "failed to allocate data buffers\n");
                err = -ENOMEM;
-               goto out;
+               goto out_clken;
        }
 
        priv->txbd = priv->rxbd + RX_BD_NUM;
@@ -734,7 +802,7 @@ static int arc_emac_probe(struct platform_device *pdev)
        err = arc_mdio_probe(pdev, priv);
        if (err) {
                dev_err(&pdev->dev, "failed to probe MII bus\n");
-               goto out;
+               goto out_clken;
        }
 
        priv->phy_dev = of_phy_connect(ndev, phy_node, arc_emac_adjust_link, 0,
@@ -742,7 +810,7 @@ static int arc_emac_probe(struct platform_device *pdev)
        if (!priv->phy_dev) {
                dev_err(&pdev->dev, "of_phy_connect() failed\n");
                err = -ENODEV;
-               goto out;
+               goto out_mdio;
        }
 
        dev_info(&pdev->dev, "connected to %s phy with id 0x%x\n",
@@ -752,14 +820,25 @@ static int arc_emac_probe(struct platform_device *pdev)
 
        err = register_netdev(ndev);
        if (err) {
-               netif_napi_del(&priv->napi);
                dev_err(&pdev->dev, "failed to register network device\n");
-               goto out;
+               goto out_netif_api;
        }
 
        return 0;
 
-out:
+out_netif_api:
+       netif_napi_del(&priv->napi);
+       phy_disconnect(priv->phy_dev);
+       priv->phy_dev = NULL;
+out_mdio:
+       arc_mdio_remove(priv);
+out_clken:
+       if (!IS_ERR(priv->clk))
+               clk_disable_unprepare(priv->clk);
+out_clkget:
+       if (!IS_ERR(priv->clk))
+               clk_put(priv->clk);
+out_netdev:
        free_netdev(ndev);
        return err;
 }
@@ -774,6 +853,12 @@ static int arc_emac_remove(struct platform_device *pdev)
        arc_mdio_remove(priv);
        unregister_netdev(ndev);
        netif_napi_del(&priv->napi);
+
+       if (!IS_ERR(priv->clk)) {
+               clk_disable_unprepare(priv->clk);
+               clk_put(priv->clk);
+       }
+
        free_netdev(ndev);
 
        return 0;
index 17bb9ce96260df20eba44a9f215778a62c28373e..49faa97a30c364185f7e988a7e64be5529a62382 100644 (file)
@@ -1302,7 +1302,7 @@ static int alx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        }
 
        netdev->netdev_ops = &alx_netdev_ops;
-       SET_ETHTOOL_OPS(netdev, &alx_ethtool_ops);
+       netdev->ethtool_ops = &alx_ethtool_ops;
        netdev->irq = pdev->irq;
        netdev->watchdog_timeo = ALX_WATCHDOG_TIME;
 
index 859ea844ba0ff7c292994a59446951680a2e7b79..ecacaaeb2b92929caeeb12849b06c3d6b53707c9 100644 (file)
@@ -305,5 +305,5 @@ static const struct ethtool_ops atl1c_ethtool_ops = {
 
 void atl1c_set_ethtool_ops(struct net_device *netdev)
 {
-       SET_ETHTOOL_OPS(netdev, &atl1c_ethtool_ops);
+       netdev->ethtool_ops = &atl1c_ethtool_ops;
 }
index 82b23861bf5598698f3a21232e37eaf693f83c6e..206e9b7be43123911b80ab1d84478bd4412079fe 100644 (file)
@@ -388,5 +388,5 @@ static const struct ethtool_ops atl1e_ethtool_ops = {
 
 void atl1e_set_ethtool_ops(struct net_device *netdev)
 {
-       SET_ETHTOOL_OPS(netdev, &atl1e_ethtool_ops);
+       netdev->ethtool_ops = &atl1e_ethtool_ops;
 }
index 78befb522a528268fae32c68649ead0a01263366..2587fed7b02cbc3e93674ec73e053b0f50c28c46 100644 (file)
@@ -1396,7 +1396,7 @@ static int atl2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        atl2_setup_pcicmd(pdev);
 
        netdev->netdev_ops = &atl2_netdev_ops;
-       SET_ETHTOOL_OPS(netdev, &atl2_ethtool_ops);
+       netdev->ethtool_ops = &atl2_ethtool_ops;
        netdev->watchdog_timeo = 5 * HZ;
        strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
 
index 85dbddd03722b20a861f53cba7fe00b6cb66f3db..3e488094b0731811459c66dcb0517d00cb7dfbbe 100644 (file)
@@ -150,4 +150,15 @@ config BGMAC
          In case of using this driver on BCM4706 it's also requires to enable
          BCMA_DRIVER_GMAC_CMN to make it work.
 
+config SYSTEMPORT
+       tristate "Broadcom SYSTEMPORT internal MAC support"
+       depends on OF
+       select MII
+       select PHYLIB
+       select FIXED_PHY if SYSTEMPORT=y
+       help
+         This driver supports the built-in Ethernet MACs found in the
+         Broadcom BCM7xxx Set Top Box family chipset using an internal
+         Ethernet switch.
+
 endif # NET_VENDOR_BROADCOM
index fd639a0d4c7d64b2b7db5eb084087502e3c6d63a..e2a958a657e0bb8f816d205e0792d3fdfbfc70a4 100644 (file)
@@ -11,3 +11,4 @@ obj-$(CONFIG_BNX2X) += bnx2x/
 obj-$(CONFIG_SB1250_MAC) += sb1250-mac.o
 obj-$(CONFIG_TIGON3) += tg3.o
 obj-$(CONFIG_BGMAC) += bgmac.o
+obj-$(CONFIG_SYSTEMPORT) += bcmsysport.o
index 05ba6258901794ab51842ddc8d630f4d1286798d..ca5a20a48b14cdb48ccd75aaa1c34715a8dcfee3 100644 (file)
@@ -2380,7 +2380,7 @@ static int b44_init_one(struct ssb_device *sdev,
        netif_napi_add(dev, &bp->napi, b44_poll, 64);
        dev->watchdog_timeo = B44_TX_TIMEOUT;
        dev->irq = sdev->irq;
-       SET_ETHTOOL_OPS(dev, &b44_ethtool_ops);
+       dev->ethtool_ops = &b44_ethtool_ops;
 
        err = ssb_bus_powerup(sdev->bus, 0);
        if (err) {
index a7d11f5565d69342ad296471a9a5d44f8d7c51d5..3e8d1a88ed3d7b597298100798e5286449ffafe8 100644 (file)
@@ -1315,8 +1315,7 @@ static const struct bcm_enet_stats bcm_enet_gstrings_stats[] = {
 
 };
 
-#define BCM_ENET_STATS_LEN     \
-       (sizeof(bcm_enet_gstrings_stats) / sizeof(struct bcm_enet_stats))
+#define BCM_ENET_STATS_LEN     ARRAY_SIZE(bcm_enet_gstrings_stats)
 
 static const u32 unused_mib_regs[] = {
        ETH_MIB_TX_ALL_OCTETS,
@@ -1898,7 +1897,7 @@ static int bcm_enet_probe(struct platform_device *pdev)
        dev->netdev_ops = &bcm_enet_ops;
        netif_napi_add(dev, &priv->napi, bcm_enet_poll, 16);
 
-       SET_ETHTOOL_OPS(dev, &bcm_enet_ethtool_ops);
+       dev->ethtool_ops = &bcm_enet_ethtool_ops;
        SET_NETDEV_DEV(dev, &pdev->dev);
 
        ret = register_netdev(dev);
@@ -2784,7 +2783,7 @@ static int bcm_enetsw_probe(struct platform_device *pdev)
        /* register netdevice */
        dev->netdev_ops = &bcm_enetsw_ops;
        netif_napi_add(dev, &priv->napi, bcm_enet_poll, 16);
-       SET_ETHTOOL_OPS(dev, &bcm_enetsw_ethtool_ops);
+       dev->ethtool_ops = &bcm_enetsw_ethtool_ops;
        SET_NETDEV_DEV(dev, &pdev->dev);
 
        spin_lock_init(&priv->enetsw_mdio_lock);
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
new file mode 100644 (file)
index 0000000..dc708a8
--- /dev/null
@@ -0,0 +1,1649 @@
+/*
+ * Broadcom BCM7xxx System Port Ethernet MAC driver
+ *
+ * Copyright (C) 2014 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#define pr_fmt(fmt)    KBUILD_MODNAME ": " fmt
+
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_net.h>
+#include <linux/of_mdio.h>
+#include <linux/phy.h>
+#include <linux/phy_fixed.h>
+#include <net/ip.h>
+#include <net/ipv6.h>
+
+#include "bcmsysport.h"
+
+/* I/O accessors register helpers */
+#define BCM_SYSPORT_IO_MACRO(name, offset) \
+static inline u32 name##_readl(struct bcm_sysport_priv *priv, u32 off) \
+{                                                                      \
+       u32 reg = __raw_readl(priv->base + offset + off);               \
+       return reg;                                                     \
+}                                                                      \
+static inline void name##_writel(struct bcm_sysport_priv *priv,                \
+                                 u32 val, u32 off)                     \
+{                                                                      \
+       __raw_writel(val, priv->base + offset + off);                   \
+}                                                                      \
+
+BCM_SYSPORT_IO_MACRO(intrl2_0, SYS_PORT_INTRL2_0_OFFSET);
+BCM_SYSPORT_IO_MACRO(intrl2_1, SYS_PORT_INTRL2_1_OFFSET);
+BCM_SYSPORT_IO_MACRO(umac, SYS_PORT_UMAC_OFFSET);
+BCM_SYSPORT_IO_MACRO(tdma, SYS_PORT_TDMA_OFFSET);
+BCM_SYSPORT_IO_MACRO(rdma, SYS_PORT_RDMA_OFFSET);
+BCM_SYSPORT_IO_MACRO(rxchk, SYS_PORT_RXCHK_OFFSET);
+BCM_SYSPORT_IO_MACRO(txchk, SYS_PORT_TXCHK_OFFSET);
+BCM_SYSPORT_IO_MACRO(rbuf, SYS_PORT_RBUF_OFFSET);
+BCM_SYSPORT_IO_MACRO(tbuf, SYS_PORT_TBUF_OFFSET);
+BCM_SYSPORT_IO_MACRO(topctrl, SYS_PORT_TOPCTRL_OFFSET);
+
+/* L2-interrupt masking/unmasking helpers, does automatic saving of the applied
+ * mask in a software copy to avoid CPU_MASK_STATUS reads in hot-paths.
+  */
+#define BCM_SYSPORT_INTR_L2(which)     \
+static inline void intrl2_##which##_mask_clear(struct bcm_sysport_priv *priv, \
+                                               u32 mask)               \
+{                                                                      \
+       intrl2_##which##_writel(priv, mask, INTRL2_CPU_MASK_CLEAR);     \
+       priv->irq##which##_mask &= ~(mask);                             \
+}                                                                      \
+static inline void intrl2_##which##_mask_set(struct bcm_sysport_priv *priv, \
+                                               u32 mask)               \
+{                                                                      \
+       intrl2_## which##_writel(priv, mask, INTRL2_CPU_MASK_SET);      \
+       priv->irq##which##_mask |= (mask);                              \
+}                                                                      \
+
+BCM_SYSPORT_INTR_L2(0)
+BCM_SYSPORT_INTR_L2(1)
+
+/* Register accesses to GISB/RBUS registers are expensive (few hundred
+ * nanoseconds), so keep the check for 64-bits explicit here to save
+ * one register write per-packet on 32-bits platforms.
+ */
+static inline void dma_desc_set_addr(struct bcm_sysport_priv *priv,
+                                    void __iomem *d,
+                                    dma_addr_t addr)
+{
+#ifdef CONFIG_PHYS_ADDR_T_64BIT
+       __raw_writel(upper_32_bits(addr) & DESC_ADDR_HI_MASK,
+                       d + DESC_ADDR_HI_STATUS_LEN);
+#endif
+       __raw_writel(lower_32_bits(addr), d + DESC_ADDR_LO);
+}
+
+static inline void tdma_port_write_desc_addr(struct bcm_sysport_priv *priv,
+                                               struct dma_desc *desc,
+                                               unsigned int port)
+{
+       /* Ports are latched, so write upper address first */
+       tdma_writel(priv, desc->addr_status_len, TDMA_WRITE_PORT_HI(port));
+       tdma_writel(priv, desc->addr_lo, TDMA_WRITE_PORT_LO(port));
+}
+
+/* Ethtool operations */
+static int bcm_sysport_set_settings(struct net_device *dev,
+                                   struct ethtool_cmd *cmd)
+{
+       struct bcm_sysport_priv *priv = netdev_priv(dev);
+
+       if (!netif_running(dev))
+               return -EINVAL;
+
+       return phy_ethtool_sset(priv->phydev, cmd);
+}
+
+static int bcm_sysport_get_settings(struct net_device *dev,
+                                       struct ethtool_cmd *cmd)
+{
+       struct bcm_sysport_priv *priv = netdev_priv(dev);
+
+       if (!netif_running(dev))
+               return -EINVAL;
+
+       return phy_ethtool_gset(priv->phydev, cmd);
+}
+
+static int bcm_sysport_set_rx_csum(struct net_device *dev,
+                                       netdev_features_t wanted)
+{
+       struct bcm_sysport_priv *priv = netdev_priv(dev);
+       u32 reg;
+
+       priv->rx_csum_en = !!(wanted & NETIF_F_RXCSUM);
+       reg = rxchk_readl(priv, RXCHK_CONTROL);
+       if (priv->rx_csum_en)
+               reg |= RXCHK_EN;
+       else
+               reg &= ~RXCHK_EN;
+
+       /* If UniMAC forwards CRC, we need to skip over it to get
+        * a valid CHK bit to be set in the per-packet status word
+        */
+       if (priv->rx_csum_en && priv->crc_fwd)
+               reg |= RXCHK_SKIP_FCS;
+       else
+               reg &= ~RXCHK_SKIP_FCS;
+
+       rxchk_writel(priv, reg, RXCHK_CONTROL);
+
+       return 0;
+}
+
+static int bcm_sysport_set_tx_csum(struct net_device *dev,
+                                       netdev_features_t wanted)
+{
+       struct bcm_sysport_priv *priv = netdev_priv(dev);
+       u32 reg;
+
+       /* Hardware transmit checksum requires us to enable the Transmit status
+        * block prepended to the packet contents
+        */
+       priv->tsb_en = !!(wanted & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM));
+       reg = tdma_readl(priv, TDMA_CONTROL);
+       if (priv->tsb_en)
+               reg |= TSB_EN;
+       else
+               reg &= ~TSB_EN;
+       tdma_writel(priv, reg, TDMA_CONTROL);
+
+       return 0;
+}
+
+static int bcm_sysport_set_features(struct net_device *dev,
+                                       netdev_features_t features)
+{
+       netdev_features_t changed = features ^ dev->features;
+       netdev_features_t wanted = dev->wanted_features;
+       int ret = 0;
+
+       if (changed & NETIF_F_RXCSUM)
+               ret = bcm_sysport_set_rx_csum(dev, wanted);
+       if (changed & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM))
+               ret = bcm_sysport_set_tx_csum(dev, wanted);
+
+       return ret;
+}
+
+/* Hardware counters must be kept in sync because the order/offset
+ * is important here (order in structure declaration = order in hardware)
+ */
+static const struct bcm_sysport_stats bcm_sysport_gstrings_stats[] = {
+       /* general stats */
+       STAT_NETDEV(rx_packets),
+       STAT_NETDEV(tx_packets),
+       STAT_NETDEV(rx_bytes),
+       STAT_NETDEV(tx_bytes),
+       STAT_NETDEV(rx_errors),
+       STAT_NETDEV(tx_errors),
+       STAT_NETDEV(rx_dropped),
+       STAT_NETDEV(tx_dropped),
+       STAT_NETDEV(multicast),
+       /* UniMAC RSV counters */
+       STAT_MIB_RX("rx_64_octets", mib.rx.pkt_cnt.cnt_64),
+       STAT_MIB_RX("rx_65_127_oct", mib.rx.pkt_cnt.cnt_127),
+       STAT_MIB_RX("rx_128_255_oct", mib.rx.pkt_cnt.cnt_255),
+       STAT_MIB_RX("rx_256_511_oct", mib.rx.pkt_cnt.cnt_511),
+       STAT_MIB_RX("rx_512_1023_oct", mib.rx.pkt_cnt.cnt_1023),
+       STAT_MIB_RX("rx_1024_1518_oct", mib.rx.pkt_cnt.cnt_1518),
+       STAT_MIB_RX("rx_vlan_1519_1522_oct", mib.rx.pkt_cnt.cnt_mgv),
+       STAT_MIB_RX("rx_1522_2047_oct", mib.rx.pkt_cnt.cnt_2047),
+       STAT_MIB_RX("rx_2048_4095_oct", mib.rx.pkt_cnt.cnt_4095),
+       STAT_MIB_RX("rx_4096_9216_oct", mib.rx.pkt_cnt.cnt_9216),
+       STAT_MIB_RX("rx_pkts", mib.rx.pkt),
+       STAT_MIB_RX("rx_bytes", mib.rx.bytes),
+       STAT_MIB_RX("rx_multicast", mib.rx.mca),
+       STAT_MIB_RX("rx_broadcast", mib.rx.bca),
+       STAT_MIB_RX("rx_fcs", mib.rx.fcs),
+       STAT_MIB_RX("rx_control", mib.rx.cf),
+       STAT_MIB_RX("rx_pause", mib.rx.pf),
+       STAT_MIB_RX("rx_unknown", mib.rx.uo),
+       STAT_MIB_RX("rx_align", mib.rx.aln),
+       STAT_MIB_RX("rx_outrange", mib.rx.flr),
+       STAT_MIB_RX("rx_code", mib.rx.cde),
+       STAT_MIB_RX("rx_carrier", mib.rx.fcr),
+       STAT_MIB_RX("rx_oversize", mib.rx.ovr),
+       STAT_MIB_RX("rx_jabber", mib.rx.jbr),
+       STAT_MIB_RX("rx_mtu_err", mib.rx.mtue),
+       STAT_MIB_RX("rx_good_pkts", mib.rx.pok),
+       STAT_MIB_RX("rx_unicast", mib.rx.uc),
+       STAT_MIB_RX("rx_ppp", mib.rx.ppp),
+       STAT_MIB_RX("rx_crc", mib.rx.rcrc),
+       /* UniMAC TSV counters */
+       STAT_MIB_TX("tx_64_octets", mib.tx.pkt_cnt.cnt_64),
+       STAT_MIB_TX("tx_65_127_oct", mib.tx.pkt_cnt.cnt_127),
+       STAT_MIB_TX("tx_128_255_oct", mib.tx.pkt_cnt.cnt_255),
+       STAT_MIB_TX("tx_256_511_oct", mib.tx.pkt_cnt.cnt_511),
+       STAT_MIB_TX("tx_512_1023_oct", mib.tx.pkt_cnt.cnt_1023),
+       STAT_MIB_TX("tx_1024_1518_oct", mib.tx.pkt_cnt.cnt_1518),
+       STAT_MIB_TX("tx_vlan_1519_1522_oct", mib.tx.pkt_cnt.cnt_mgv),
+       STAT_MIB_TX("tx_1522_2047_oct", mib.tx.pkt_cnt.cnt_2047),
+       STAT_MIB_TX("tx_2048_4095_oct", mib.tx.pkt_cnt.cnt_4095),
+       STAT_MIB_TX("tx_4096_9216_oct", mib.tx.pkt_cnt.cnt_9216),
+       STAT_MIB_TX("tx_pkts", mib.tx.pkts),
+       STAT_MIB_TX("tx_multicast", mib.tx.mca),
+       STAT_MIB_TX("tx_broadcast", mib.tx.bca),
+       STAT_MIB_TX("tx_pause", mib.tx.pf),
+       STAT_MIB_TX("tx_control", mib.tx.cf),
+       STAT_MIB_TX("tx_fcs_err", mib.tx.fcs),
+       STAT_MIB_TX("tx_oversize", mib.tx.ovr),
+       STAT_MIB_TX("tx_defer", mib.tx.drf),
+       STAT_MIB_TX("tx_excess_defer", mib.tx.edf),
+       STAT_MIB_TX("tx_single_col", mib.tx.scl),
+       STAT_MIB_TX("tx_multi_col", mib.tx.mcl),
+       STAT_MIB_TX("tx_late_col", mib.tx.lcl),
+       STAT_MIB_TX("tx_excess_col", mib.tx.ecl),
+       STAT_MIB_TX("tx_frags", mib.tx.frg),
+       STAT_MIB_TX("tx_total_col", mib.tx.ncl),
+       STAT_MIB_TX("tx_jabber", mib.tx.jbr),
+       STAT_MIB_TX("tx_bytes", mib.tx.bytes),
+       STAT_MIB_TX("tx_good_pkts", mib.tx.pok),
+       STAT_MIB_TX("tx_unicast", mib.tx.uc),
+       /* UniMAC RUNT counters */
+       STAT_RUNT("rx_runt_pkts", mib.rx_runt_cnt),
+       STAT_RUNT("rx_runt_valid_fcs", mib.rx_runt_fcs),
+       STAT_RUNT("rx_runt_inval_fcs_align", mib.rx_runt_fcs_align),
+       STAT_RUNT("rx_runt_bytes", mib.rx_runt_bytes),
+       /* RXCHK misc statistics */
+       STAT_RXCHK("rxchk_bad_csum", mib.rxchk_bad_csum, RXCHK_BAD_CSUM_CNTR),
+       STAT_RXCHK("rxchk_other_pkt_disc", mib.rxchk_other_pkt_disc,
+                       RXCHK_OTHER_DISC_CNTR),
+       /* RBUF misc statistics */
+       STAT_RBUF("rbuf_ovflow_cnt", mib.rbuf_ovflow_cnt, RBUF_OVFL_DISC_CNTR),
+       STAT_RBUF("rbuf_err_cnt", mib.rbuf_err_cnt, RBUF_ERR_PKT_CNTR),
+};
+
+#define BCM_SYSPORT_STATS_LEN  ARRAY_SIZE(bcm_sysport_gstrings_stats)
+
+static void bcm_sysport_get_drvinfo(struct net_device *dev,
+                                       struct ethtool_drvinfo *info)
+{
+       strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
+       strlcpy(info->version, "0.1", sizeof(info->version));
+       strlcpy(info->bus_info, "platform", sizeof(info->bus_info));
+       info->n_stats = BCM_SYSPORT_STATS_LEN;
+}
+
+static u32 bcm_sysport_get_msglvl(struct net_device *dev)
+{
+       struct bcm_sysport_priv *priv = netdev_priv(dev);
+
+       return priv->msg_enable;
+}
+
+static void bcm_sysport_set_msglvl(struct net_device *dev, u32 enable)
+{
+       struct bcm_sysport_priv *priv = netdev_priv(dev);
+
+       priv->msg_enable = enable;
+}
+
+static int bcm_sysport_get_sset_count(struct net_device *dev, int string_set)
+{
+       switch (string_set) {
+       case ETH_SS_STATS:
+               return BCM_SYSPORT_STATS_LEN;
+       default:
+               return -EOPNOTSUPP;
+       }
+}
+
+static void bcm_sysport_get_strings(struct net_device *dev,
+                                       u32 stringset, u8 *data)
+{
+       int i;
+
+       switch (stringset) {
+       case ETH_SS_STATS:
+               for (i = 0; i < BCM_SYSPORT_STATS_LEN; i++) {
+                       memcpy(data + i * ETH_GSTRING_LEN,
+                               bcm_sysport_gstrings_stats[i].stat_string,
+                               ETH_GSTRING_LEN);
+               }
+               break;
+       default:
+               break;
+       }
+}
+
+static void bcm_sysport_update_mib_counters(struct bcm_sysport_priv *priv)
+{
+       int i, j = 0;
+
+       for (i = 0; i < BCM_SYSPORT_STATS_LEN; i++) {
+               const struct bcm_sysport_stats *s;
+               u8 offset = 0;
+               u32 val = 0;
+               char *p;
+
+               s = &bcm_sysport_gstrings_stats[i];
+               switch (s->type) {
+               case BCM_SYSPORT_STAT_NETDEV:
+                       continue;
+               case BCM_SYSPORT_STAT_MIB_RX:
+               case BCM_SYSPORT_STAT_MIB_TX:
+               case BCM_SYSPORT_STAT_RUNT:
+                       if (s->type != BCM_SYSPORT_STAT_MIB_RX)
+                               offset = UMAC_MIB_STAT_OFFSET;
+                       val = umac_readl(priv, UMAC_MIB_START + j + offset);
+                       break;
+               case BCM_SYSPORT_STAT_RXCHK:
+                       val = rxchk_readl(priv, s->reg_offset);
+                       if (val == ~0)
+                               rxchk_writel(priv, 0, s->reg_offset);
+                       break;
+               case BCM_SYSPORT_STAT_RBUF:
+                       val = rbuf_readl(priv, s->reg_offset);
+                       if (val == ~0)
+                               rbuf_writel(priv, 0, s->reg_offset);
+                       break;
+               }
+
+               j += s->stat_sizeof;
+               p = (char *)priv + s->stat_offset;
+               *(u32 *)p = val;
+       }
+
+       netif_dbg(priv, hw, priv->netdev, "updated MIB counters\n");
+}
+
+static void bcm_sysport_get_stats(struct net_device *dev,
+                                       struct ethtool_stats *stats, u64 *data)
+{
+       struct bcm_sysport_priv *priv = netdev_priv(dev);
+       int i;
+
+       if (netif_running(dev))
+               bcm_sysport_update_mib_counters(priv);
+
+       for (i =  0; i < BCM_SYSPORT_STATS_LEN; i++) {
+               const struct bcm_sysport_stats *s;
+               char *p;
+
+               s = &bcm_sysport_gstrings_stats[i];
+               if (s->type == BCM_SYSPORT_STAT_NETDEV)
+                       p = (char *)&dev->stats;
+               else
+                       p = (char *)priv;
+               p += s->stat_offset;
+               data[i] = *(u32 *)p;
+       }
+}
+
+static void bcm_sysport_free_cb(struct bcm_sysport_cb *cb)
+{
+       dev_kfree_skb_any(cb->skb);
+       cb->skb = NULL;
+       dma_unmap_addr_set(cb, dma_addr, 0);
+}
+
+static int bcm_sysport_rx_refill(struct bcm_sysport_priv *priv,
+                                struct bcm_sysport_cb *cb)
+{
+       struct device *kdev = &priv->pdev->dev;
+       struct net_device *ndev = priv->netdev;
+       dma_addr_t mapping;
+       int ret;
+
+       cb->skb = netdev_alloc_skb(priv->netdev, RX_BUF_LENGTH);
+       if (!cb->skb) {
+               netif_err(priv, rx_err, ndev, "SKB alloc failed\n");
+               return -ENOMEM;
+       }
+
+       mapping = dma_map_single(kdev, cb->skb->data,
+                               RX_BUF_LENGTH, DMA_FROM_DEVICE);
+       ret = dma_mapping_error(kdev, mapping);
+       if (ret) {
+               bcm_sysport_free_cb(cb);
+               netif_err(priv, rx_err, ndev, "DMA mapping failure\n");
+               return ret;
+       }
+
+       dma_unmap_addr_set(cb, dma_addr, mapping);
+       dma_desc_set_addr(priv, priv->rx_bd_assign_ptr, mapping);
+
+       priv->rx_bd_assign_index++;
+       priv->rx_bd_assign_index &= (priv->num_rx_bds - 1);
+       priv->rx_bd_assign_ptr = priv->rx_bds +
+               (priv->rx_bd_assign_index * DESC_SIZE);
+
+       netif_dbg(priv, rx_status, ndev, "RX refill\n");
+
+       return 0;
+}
+
+static int bcm_sysport_alloc_rx_bufs(struct bcm_sysport_priv *priv)
+{
+       struct bcm_sysport_cb *cb;
+       int ret = 0;
+       unsigned int i;
+
+       for (i = 0; i < priv->num_rx_bds; i++) {
+               cb = &priv->rx_cbs[priv->rx_bd_assign_index];
+               if (cb->skb)
+                       continue;
+
+               ret = bcm_sysport_rx_refill(priv, cb);
+               if (ret)
+                       break;
+       }
+
+       return ret;
+}
+
+/* Poll the hardware for up to budget packets to process */
+static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv,
+                                       unsigned int budget)
+{
+       struct device *kdev = &priv->pdev->dev;
+       struct net_device *ndev = priv->netdev;
+       unsigned int processed = 0, to_process;
+       struct bcm_sysport_cb *cb;
+       struct sk_buff *skb;
+       unsigned int p_index;
+       u16 len, status;
+       struct rsb *rsb;
+
+       /* Determine how much we should process since last call */
+       p_index = rdma_readl(priv, RDMA_PROD_INDEX);
+       p_index &= RDMA_PROD_INDEX_MASK;
+
+       if (p_index < priv->rx_c_index)
+               to_process = (RDMA_CONS_INDEX_MASK + 1) -
+                       priv->rx_c_index + p_index;
+       else
+               to_process = p_index - priv->rx_c_index;
+
+       netif_dbg(priv, rx_status, ndev,
+                       "p_index=%d rx_c_index=%d to_process=%d\n",
+                       p_index, priv->rx_c_index, to_process);
+
+       while ((processed < to_process) &&
+               (processed < budget)) {
+
+               cb = &priv->rx_cbs[priv->rx_read_ptr];
+               skb = cb->skb;
+               dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr),
+                               RX_BUF_LENGTH, DMA_FROM_DEVICE);
+
+               /* Extract the Receive Status Block prepended */
+               rsb = (struct rsb *)skb->data;
+               len = (rsb->rx_status_len >> DESC_LEN_SHIFT) & DESC_LEN_MASK;
+               status = (rsb->rx_status_len >> DESC_STATUS_SHIFT) &
+                       DESC_STATUS_MASK;
+
+               processed++;
+               priv->rx_read_ptr++;
+               if (priv->rx_read_ptr == priv->num_rx_bds)
+                       priv->rx_read_ptr = 0;
+
+               netif_dbg(priv, rx_status, ndev,
+                               "p=%d, c=%d, rd_ptr=%d, len=%d, flag=0x%04x\n",
+                               p_index, priv->rx_c_index, priv->rx_read_ptr,
+                               len, status);
+
+               if (unlikely(!skb)) {
+                       netif_err(priv, rx_err, ndev, "out of memory!\n");
+                       ndev->stats.rx_dropped++;
+                       ndev->stats.rx_errors++;
+                       goto refill;
+               }
+
+               if (unlikely(!(status & DESC_EOP) || !(status & DESC_SOP))) {
+                       netif_err(priv, rx_status, ndev, "fragmented packet!\n");
+                       ndev->stats.rx_dropped++;
+                       ndev->stats.rx_errors++;
+                       bcm_sysport_free_cb(cb);
+                       goto refill;
+               }
+
+               if (unlikely(status & (RX_STATUS_ERR | RX_STATUS_OVFLOW))) {
+                       netif_err(priv, rx_err, ndev, "error packet\n");
+                       if (RX_STATUS_OVFLOW)
+                               ndev->stats.rx_over_errors++;
+                       ndev->stats.rx_dropped++;
+                       ndev->stats.rx_errors++;
+                       bcm_sysport_free_cb(cb);
+                       goto refill;
+               }
+
+               skb_put(skb, len);
+
+               /* Hardware validated our checksum */
+               if (likely(status & DESC_L4_CSUM))
+                       skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+               /* Hardware pre-pends packets with 2bytes between Ethernet
+                * and IP header plus we have the Receive Status Block, strip
+                * off all of this from the SKB.
+                */
+               skb_pull(skb, sizeof(*rsb) + 2);
+               len -= (sizeof(*rsb) + 2);
+
+               /* UniMAC may forward CRC */
+               if (priv->crc_fwd) {
+                       skb_trim(skb, len - ETH_FCS_LEN);
+                       len -= ETH_FCS_LEN;
+               }
+
+               skb->protocol = eth_type_trans(skb, ndev);
+               ndev->stats.rx_packets++;
+               ndev->stats.rx_bytes += len;
+
+               napi_gro_receive(&priv->napi, skb);
+refill:
+               bcm_sysport_rx_refill(priv, cb);
+       }
+
+       return processed;
+}
+
+static void bcm_sysport_tx_reclaim_one(struct bcm_sysport_priv *priv,
+                                       struct bcm_sysport_cb *cb,
+                                       unsigned int *bytes_compl,
+                                       unsigned int *pkts_compl)
+{
+       struct device *kdev = &priv->pdev->dev;
+       struct net_device *ndev = priv->netdev;
+
+       if (cb->skb) {
+               ndev->stats.tx_bytes += cb->skb->len;
+               *bytes_compl += cb->skb->len;
+               dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr),
+                               dma_unmap_len(cb, dma_len),
+                               DMA_TO_DEVICE);
+               ndev->stats.tx_packets++;
+               (*pkts_compl)++;
+               bcm_sysport_free_cb(cb);
+       /* SKB fragment */
+       } else if (dma_unmap_addr(cb, dma_addr)) {
+               ndev->stats.tx_bytes += dma_unmap_len(cb, dma_len);
+               dma_unmap_page(kdev, dma_unmap_addr(cb, dma_addr),
+                               dma_unmap_len(cb, dma_len), DMA_TO_DEVICE);
+               dma_unmap_addr_set(cb, dma_addr, 0);
+       }
+}
+
+/* Reclaim queued SKBs for transmission completion, lockless version */
+static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv,
+                                            struct bcm_sysport_tx_ring *ring)
+{
+       struct net_device *ndev = priv->netdev;
+       unsigned int c_index, last_c_index, last_tx_cn, num_tx_cbs;
+       unsigned int pkts_compl = 0, bytes_compl = 0;
+       struct bcm_sysport_cb *cb;
+       struct netdev_queue *txq;
+       u32 hw_ind;
+
+       txq = netdev_get_tx_queue(ndev, ring->index);
+
+       /* Compute how many descriptors have been processed since last call */
+       hw_ind = tdma_readl(priv, TDMA_DESC_RING_PROD_CONS_INDEX(ring->index));
+       c_index = (hw_ind >> RING_CONS_INDEX_SHIFT) & RING_CONS_INDEX_MASK;
+       ring->p_index = (hw_ind & RING_PROD_INDEX_MASK);
+
+       last_c_index = ring->c_index;
+       num_tx_cbs = ring->size;
+
+       c_index &= (num_tx_cbs - 1);
+
+       if (c_index >= last_c_index)
+               last_tx_cn = c_index - last_c_index;
+       else
+               last_tx_cn = num_tx_cbs - last_c_index + c_index;
+
+       netif_dbg(priv, tx_done, ndev,
+                       "ring=%d c_index=%d last_tx_cn=%d last_c_index=%d\n",
+                       ring->index, c_index, last_tx_cn, last_c_index);
+
+       while (last_tx_cn-- > 0) {
+               cb = ring->cbs + last_c_index;
+               bcm_sysport_tx_reclaim_one(priv, cb, &bytes_compl, &pkts_compl);
+
+               ring->desc_count++;
+               last_c_index++;
+               last_c_index &= (num_tx_cbs - 1);
+       }
+
+       ring->c_index = c_index;
+
+       if (netif_tx_queue_stopped(txq) && pkts_compl)
+               netif_tx_wake_queue(txq);
+
+       netif_dbg(priv, tx_done, ndev,
+                       "ring=%d c_index=%d pkts_compl=%d, bytes_compl=%d\n",
+                       ring->index, ring->c_index, pkts_compl, bytes_compl);
+
+       return pkts_compl;
+}
+
+/* Locked version of the per-ring TX reclaim routine */
+static unsigned int bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv,
+                                          struct bcm_sysport_tx_ring *ring)
+{
+       unsigned int released;
+
+       spin_lock(&ring->lock);
+       released = __bcm_sysport_tx_reclaim(priv, ring);
+       spin_unlock(&ring->lock);
+
+       return released;
+}
+
+static int bcm_sysport_tx_poll(struct napi_struct *napi, int budget)
+{
+       struct bcm_sysport_tx_ring *ring =
+               container_of(napi, struct bcm_sysport_tx_ring, napi);
+       unsigned int work_done = 0;
+
+       work_done = bcm_sysport_tx_reclaim(ring->priv, ring);
+
+       if (work_done < budget) {
+               napi_complete(napi);
+               /* re-enable TX interrupt */
+               intrl2_1_mask_clear(ring->priv, BIT(ring->index));
+       }
+
+       return work_done;
+}
+
+static void bcm_sysport_tx_reclaim_all(struct bcm_sysport_priv *priv)
+{
+       unsigned int q;
+
+       for (q = 0; q < priv->netdev->num_tx_queues; q++)
+               bcm_sysport_tx_reclaim(priv, &priv->tx_rings[q]);
+}
+
+static int bcm_sysport_poll(struct napi_struct *napi, int budget)
+{
+       struct bcm_sysport_priv *priv =
+               container_of(napi, struct bcm_sysport_priv, napi);
+       unsigned int work_done = 0;
+
+       work_done = bcm_sysport_desc_rx(priv, budget);
+
+       priv->rx_c_index += work_done;
+       priv->rx_c_index &= RDMA_CONS_INDEX_MASK;
+       rdma_writel(priv, priv->rx_c_index, RDMA_CONS_INDEX);
+
+       if (work_done < budget) {
+               napi_complete(napi);
+               /* re-enable RX interrupts */
+               intrl2_0_mask_clear(priv, INTRL2_0_RDMA_MBDONE);
+       }
+
+       return work_done;
+}
+
+
+/* RX and misc interrupt routine */
+static irqreturn_t bcm_sysport_rx_isr(int irq, void *dev_id)
+{
+       struct net_device *dev = dev_id;
+       struct bcm_sysport_priv *priv = netdev_priv(dev);
+
+       priv->irq0_stat = intrl2_0_readl(priv, INTRL2_CPU_STATUS) &
+                         ~intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS);
+       intrl2_0_writel(priv, priv->irq0_stat, INTRL2_CPU_CLEAR);
+
+       if (unlikely(priv->irq0_stat == 0)) {
+               netdev_warn(priv->netdev, "spurious RX interrupt\n");
+               return IRQ_NONE;
+       }
+
+       if (priv->irq0_stat & INTRL2_0_RDMA_MBDONE) {
+               if (likely(napi_schedule_prep(&priv->napi))) {
+                       /* disable RX interrupts */
+                       intrl2_0_mask_set(priv, INTRL2_0_RDMA_MBDONE);
+                       __napi_schedule(&priv->napi);
+               }
+       }
+
+       /* TX ring is full, perform a full reclaim since we do not know
+        * which one would trigger this interrupt
+        */
+       if (priv->irq0_stat & INTRL2_0_TX_RING_FULL)
+               bcm_sysport_tx_reclaim_all(priv);
+
+       return IRQ_HANDLED;
+}
+
+/* TX interrupt service routine */
+static irqreturn_t bcm_sysport_tx_isr(int irq, void *dev_id)
+{
+       struct net_device *dev = dev_id;
+       struct bcm_sysport_priv *priv = netdev_priv(dev);
+       struct bcm_sysport_tx_ring *txr;
+       unsigned int ring;
+
+       priv->irq1_stat = intrl2_1_readl(priv, INTRL2_CPU_STATUS) &
+                               ~intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS);
+       intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
+
+       if (unlikely(priv->irq1_stat == 0)) {
+               netdev_warn(priv->netdev, "spurious TX interrupt\n");
+               return IRQ_NONE;
+       }
+
+       for (ring = 0; ring < dev->num_tx_queues; ring++) {
+               if (!(priv->irq1_stat & BIT(ring)))
+                       continue;
+
+               txr = &priv->tx_rings[ring];
+
+               if (likely(napi_schedule_prep(&txr->napi))) {
+                       intrl2_1_mask_set(priv, BIT(ring));
+                       __napi_schedule(&txr->napi);
+               }
+       }
+
+       return IRQ_HANDLED;
+}
+
+static int bcm_sysport_insert_tsb(struct sk_buff *skb, struct net_device *dev)
+{
+       struct sk_buff *nskb;
+       struct tsb *tsb;
+       u32 csum_info;
+       u8 ip_proto;
+       u16 csum_start;
+       u16 ip_ver;
+
+       /* Re-allocate SKB if needed */
+       if (unlikely(skb_headroom(skb) < sizeof(*tsb))) {
+               nskb = skb_realloc_headroom(skb, sizeof(*tsb));
+               dev_kfree_skb(skb);
+               if (!nskb) {
+                       dev->stats.tx_errors++;
+                       dev->stats.tx_dropped++;
+                       return -ENOMEM;
+               }
+               skb = nskb;
+       }
+
+       tsb = (struct tsb *)skb_push(skb, sizeof(*tsb));
+       /* Zero-out TSB by default */
+       memset(tsb, 0, sizeof(*tsb));
+
+       if (skb->ip_summed == CHECKSUM_PARTIAL) {
+               ip_ver = htons(skb->protocol);
+               switch (ip_ver) {
+               case ETH_P_IP:
+                       ip_proto = ip_hdr(skb)->protocol;
+                       break;
+               case ETH_P_IPV6:
+                       ip_proto = ipv6_hdr(skb)->nexthdr;
+                       break;
+               default:
+                       return 0;
+               }
+
+               /* Get the checksum offset and the L4 (transport) offset */
+               csum_start = skb_checksum_start_offset(skb) - sizeof(*tsb);
+               csum_info = (csum_start + skb->csum_offset) & L4_CSUM_PTR_MASK;
+               csum_info |= (csum_start << L4_PTR_SHIFT);
+
+               if (ip_proto == IPPROTO_TCP || ip_proto == IPPROTO_UDP) {
+                       csum_info |= L4_LENGTH_VALID;
+                       if (ip_proto == IPPROTO_UDP && ip_ver == ETH_P_IP)
+                               csum_info |= L4_UDP;
+               } else
+                       csum_info = 0;
+
+               tsb->l4_ptr_dest_map = csum_info;
+       }
+
+       return 0;
+}
+
+static netdev_tx_t bcm_sysport_xmit(struct sk_buff *skb,
+                                   struct net_device *dev)
+{
+       struct bcm_sysport_priv *priv = netdev_priv(dev);
+       struct device *kdev = &priv->pdev->dev;
+       struct bcm_sysport_tx_ring *ring;
+       struct bcm_sysport_cb *cb;
+       struct netdev_queue *txq;
+       struct dma_desc *desc;
+       unsigned int skb_len;
+       dma_addr_t mapping;
+       u32 len_status;
+       u16 queue;
+       int ret;
+
+       queue = skb_get_queue_mapping(skb);
+       txq = netdev_get_tx_queue(dev, queue);
+       ring = &priv->tx_rings[queue];
+
+       /* lock against tx reclaim in BH context */
+       spin_lock(&ring->lock);
+       if (unlikely(ring->desc_count == 0)) {
+               netif_tx_stop_queue(txq);
+               netdev_err(dev, "queue %d awake and ring full!\n", queue);
+               ret = NETDEV_TX_BUSY;
+               goto out;
+       }
+
+       /* Insert TSB and checksum infos */
+       if (priv->tsb_en) {
+               ret = bcm_sysport_insert_tsb(skb, dev);
+               if (ret) {
+                       ret = NETDEV_TX_OK;
+                       goto out;
+               }
+       }
+
+       /* The Ethernet switch we are interfaced with needs packets to be at
+        * least 64 bytes (including FCS) otherwise they will be discarded when
+        * they enter the switch port logic. When Broadcom tags are enabled, we
+        * need to make sure that packets are at least 68 bytes
+        * (including FCS and tag) because the length verification is done after
+        * the Broadcom tag is stripped off the ingress packet.
+        */
+       if (skb_padto(skb, ETH_ZLEN + ENET_BRCM_TAG_LEN)) {
+               ret = NETDEV_TX_OK;
+               goto out;
+       }
+
+       skb_len = skb->len < ETH_ZLEN + ENET_BRCM_TAG_LEN ?
+                       ETH_ZLEN + ENET_BRCM_TAG_LEN : skb->len;
+
+       mapping = dma_map_single(kdev, skb->data, skb_len, DMA_TO_DEVICE);
+       if (dma_mapping_error(kdev, mapping)) {
+               netif_err(priv, tx_err, dev, "DMA map failed at %p (len=%d)\n",
+                               skb->data, skb_len);
+               ret = NETDEV_TX_OK;
+               goto out;
+       }
+
+       /* Remember the SKB for future freeing */
+       cb = &ring->cbs[ring->curr_desc];
+       cb->skb = skb;
+       dma_unmap_addr_set(cb, dma_addr, mapping);
+       dma_unmap_len_set(cb, dma_len, skb_len);
+
+       /* Fetch a descriptor entry from our pool */
+       desc = ring->desc_cpu;
+
+       desc->addr_lo = lower_32_bits(mapping);
+       len_status = upper_32_bits(mapping) & DESC_ADDR_HI_MASK;
+       len_status |= (skb_len << DESC_LEN_SHIFT);
+       len_status |= (DESC_SOP | DESC_EOP | TX_STATUS_APP_CRC) <<
+                       DESC_STATUS_SHIFT;
+       if (skb->ip_summed == CHECKSUM_PARTIAL)
+               len_status |= (DESC_L4_CSUM << DESC_STATUS_SHIFT);
+
+       ring->curr_desc++;
+       if (ring->curr_desc == ring->size)
+               ring->curr_desc = 0;
+       ring->desc_count--;
+
+       /* Ensure write completion of the descriptor status/length
+        * in DRAM before the System Port WRITE_PORT register latches
+        * the value
+        */
+       wmb();
+       desc->addr_status_len = len_status;
+       wmb();
+
+       /* Write this descriptor address to the RING write port */
+       tdma_port_write_desc_addr(priv, desc, ring->index);
+
+       /* Check ring space and update SW control flow */
+       if (ring->desc_count == 0)
+               netif_tx_stop_queue(txq);
+
+       netif_dbg(priv, tx_queued, dev, "ring=%d desc_count=%d, curr_desc=%d\n",
+                       ring->index, ring->desc_count, ring->curr_desc);
+
+       ret = NETDEV_TX_OK;
+out:
+       spin_unlock(&ring->lock);
+       return ret;
+}
+
+static void bcm_sysport_tx_timeout(struct net_device *dev)
+{
+       netdev_warn(dev, "transmit timeout!\n");
+
+       dev->trans_start = jiffies;
+       dev->stats.tx_errors++;
+
+       netif_tx_wake_all_queues(dev);
+}
+
+/* phylib adjust link callback */
+static void bcm_sysport_adj_link(struct net_device *dev)
+{
+       struct bcm_sysport_priv *priv = netdev_priv(dev);
+       struct phy_device *phydev = priv->phydev;
+       unsigned int changed = 0;
+       u32 cmd_bits = 0, reg;
+
+       if (priv->old_link != phydev->link) {
+               changed = 1;
+               priv->old_link = phydev->link;
+       }
+
+       if (priv->old_duplex != phydev->duplex) {
+               changed = 1;
+               priv->old_duplex = phydev->duplex;
+       }
+
+       switch (phydev->speed) {
+       case SPEED_2500:
+               cmd_bits = CMD_SPEED_2500;
+               break;
+       case SPEED_1000:
+               cmd_bits = CMD_SPEED_1000;
+               break;
+       case SPEED_100:
+               cmd_bits = CMD_SPEED_100;
+               break;
+       case SPEED_10:
+               cmd_bits = CMD_SPEED_10;
+               break;
+       default:
+               break;
+       }
+       cmd_bits <<= CMD_SPEED_SHIFT;
+
+       if (phydev->duplex == DUPLEX_HALF)
+               cmd_bits |= CMD_HD_EN;
+
+       if (priv->old_pause != phydev->pause) {
+               changed = 1;
+               priv->old_pause = phydev->pause;
+       }
+
+       if (!phydev->pause)
+               cmd_bits |= CMD_RX_PAUSE_IGNORE | CMD_TX_PAUSE_IGNORE;
+
+       if (changed) {
+               reg = umac_readl(priv, UMAC_CMD);
+               reg &= ~((CMD_SPEED_MASK << CMD_SPEED_SHIFT) |
+                       CMD_HD_EN | CMD_RX_PAUSE_IGNORE |
+                       CMD_TX_PAUSE_IGNORE);
+               reg |= cmd_bits;
+               umac_writel(priv, reg, UMAC_CMD);
+
+               phy_print_status(priv->phydev);
+       }
+}
+
+static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv *priv,
+                                   unsigned int index)
+{
+       struct bcm_sysport_tx_ring *ring = &priv->tx_rings[index];
+       struct device *kdev = &priv->pdev->dev;
+       size_t size;
+       void *p;
+       u32 reg;
+
+       /* Simple descriptors partitioning for now */
+       size = 256;
+
+       /* We just need one DMA descriptor which is DMA-able, since writing to
+        * the port will allocate a new descriptor in its internal linked-list
+        */
+       p = dma_zalloc_coherent(kdev, 1, &ring->desc_dma, GFP_KERNEL);
+       if (!p) {
+               netif_err(priv, hw, priv->netdev, "DMA alloc failed\n");
+               return -ENOMEM;
+       }
+
+       ring->cbs = kzalloc(sizeof(struct bcm_sysport_cb) * size, GFP_KERNEL);
+       if (!ring->cbs) {
+               netif_err(priv, hw, priv->netdev, "CB allocation failed\n");
+               return -ENOMEM;
+       }
+
+       /* Initialize SW view of the ring */
+       spin_lock_init(&ring->lock);
+       ring->priv = priv;
+       netif_napi_add(priv->netdev, &ring->napi, bcm_sysport_tx_poll, 64);
+       ring->index = index;
+       ring->size = size;
+       ring->alloc_size = ring->size;
+       ring->desc_cpu = p;
+       ring->desc_count = ring->size;
+       ring->curr_desc = 0;
+
+       /* Initialize HW ring */
+       tdma_writel(priv, RING_EN, TDMA_DESC_RING_HEAD_TAIL_PTR(index));
+       tdma_writel(priv, 0, TDMA_DESC_RING_COUNT(index));
+       tdma_writel(priv, 1, TDMA_DESC_RING_INTR_CONTROL(index));
+       tdma_writel(priv, 0, TDMA_DESC_RING_PROD_CONS_INDEX(index));
+       tdma_writel(priv, RING_IGNORE_STATUS, TDMA_DESC_RING_MAPPING(index));
+       tdma_writel(priv, 0, TDMA_DESC_RING_PCP_DEI_VID(index));
+
+       /* Program the number of descriptors as MAX_THRESHOLD and half of
+        * its size for the hysteresis trigger
+        */
+       tdma_writel(priv, ring->size |
+                       1 << RING_HYST_THRESH_SHIFT,
+                       TDMA_DESC_RING_MAX_HYST(index));
+
+       /* Enable the ring queue in the arbiter */
+       reg = tdma_readl(priv, TDMA_TIER1_ARB_0_QUEUE_EN);
+       reg |= (1 << index);
+       tdma_writel(priv, reg, TDMA_TIER1_ARB_0_QUEUE_EN);
+
+       napi_enable(&ring->napi);
+
+       netif_dbg(priv, hw, priv->netdev,
+                       "TDMA cfg, size=%d, desc_cpu=%p\n",
+                       ring->size, ring->desc_cpu);
+
+       return 0;
+}
+
+static void bcm_sysport_fini_tx_ring(struct bcm_sysport_priv *priv,
+                                       unsigned int index)
+{
+       struct bcm_sysport_tx_ring *ring = &priv->tx_rings[index];
+       struct device *kdev = &priv->pdev->dev;
+       u32 reg;
+
+       /* Caller should stop the TDMA engine */
+       reg = tdma_readl(priv, TDMA_STATUS);
+       if (!(reg & TDMA_DISABLED))
+               netdev_warn(priv->netdev, "TDMA not stopped!\n");
+
+       napi_disable(&ring->napi);
+       netif_napi_del(&ring->napi);
+
+       bcm_sysport_tx_reclaim(priv, ring);
+
+       kfree(ring->cbs);
+       ring->cbs = NULL;
+
+       if (ring->desc_dma) {
+               dma_free_coherent(kdev, 1, ring->desc_cpu, ring->desc_dma);
+               ring->desc_dma = 0;
+       }
+       ring->size = 0;
+       ring->alloc_size = 0;
+
+       netif_dbg(priv, hw, priv->netdev, "TDMA fini done\n");
+}
+
+/* RDMA helper */
+static inline int rdma_enable_set(struct bcm_sysport_priv *priv,
+                                       unsigned int enable)
+{
+       unsigned int timeout = 1000;
+       u32 reg;
+
+       reg = rdma_readl(priv, RDMA_CONTROL);
+       if (enable)
+               reg |= RDMA_EN;
+       else
+               reg &= ~RDMA_EN;
+       rdma_writel(priv, reg, RDMA_CONTROL);
+
+       /* Poll for RMDA disabling completion */
+       do {
+               reg = rdma_readl(priv, RDMA_STATUS);
+               if (!!(reg & RDMA_DISABLED) == !enable)
+                       return 0;
+               usleep_range(1000, 2000);
+       } while (timeout-- > 0);
+
+       netdev_err(priv->netdev, "timeout waiting for RDMA to finish\n");
+
+       return -ETIMEDOUT;
+}
+
+/* TDMA helper */
+static inline int tdma_enable_set(struct bcm_sysport_priv *priv,
+                                       unsigned int enable)
+{
+       unsigned int timeout = 1000;
+       u32 reg;
+
+       reg = tdma_readl(priv, TDMA_CONTROL);
+       if (enable)
+               reg |= TDMA_EN;
+       else
+               reg &= ~TDMA_EN;
+       tdma_writel(priv, reg, TDMA_CONTROL);
+
+       /* Poll for TMDA disabling completion */
+       do {
+               reg = tdma_readl(priv, TDMA_STATUS);
+               if (!!(reg & TDMA_DISABLED) == !enable)
+                       return 0;
+
+               usleep_range(1000, 2000);
+       } while (timeout-- > 0);
+
+       netdev_err(priv->netdev, "timeout waiting for TDMA to finish\n");
+
+       return -ETIMEDOUT;
+}
+
+static int bcm_sysport_init_rx_ring(struct bcm_sysport_priv *priv)
+{
+       u32 reg;
+       int ret;
+
+       /* Initialize SW view of the RX ring */
+       priv->num_rx_bds = NUM_RX_DESC;
+       priv->rx_bds = priv->base + SYS_PORT_RDMA_OFFSET;
+       priv->rx_bd_assign_ptr = priv->rx_bds;
+       priv->rx_bd_assign_index = 0;
+       priv->rx_c_index = 0;
+       priv->rx_read_ptr = 0;
+       priv->rx_cbs = kzalloc(priv->num_rx_bds *
+                               sizeof(struct bcm_sysport_cb), GFP_KERNEL);
+       if (!priv->rx_cbs) {
+               netif_err(priv, hw, priv->netdev, "CB allocation failed\n");
+               return -ENOMEM;
+       }
+
+       ret = bcm_sysport_alloc_rx_bufs(priv);
+       if (ret) {
+               netif_err(priv, hw, priv->netdev, "SKB allocation failed\n");
+               return ret;
+       }
+
+       /* Initialize HW, ensure RDMA is disabled */
+       reg = rdma_readl(priv, RDMA_STATUS);
+       if (!(reg & RDMA_DISABLED))
+               rdma_enable_set(priv, 0);
+
+       rdma_writel(priv, 0, RDMA_WRITE_PTR_LO);
+       rdma_writel(priv, 0, RDMA_WRITE_PTR_HI);
+       rdma_writel(priv, 0, RDMA_PROD_INDEX);
+       rdma_writel(priv, 0, RDMA_CONS_INDEX);
+       rdma_writel(priv, priv->num_rx_bds << RDMA_RING_SIZE_SHIFT |
+                         RX_BUF_LENGTH, RDMA_RING_BUF_SIZE);
+       /* Operate the queue in ring mode */
+       rdma_writel(priv, 0, RDMA_START_ADDR_HI);
+       rdma_writel(priv, 0, RDMA_START_ADDR_LO);
+       rdma_writel(priv, 0, RDMA_END_ADDR_HI);
+       rdma_writel(priv, NUM_HW_RX_DESC_WORDS - 1, RDMA_END_ADDR_LO);
+
+       rdma_writel(priv, 1, RDMA_MBDONE_INTR);
+
+       netif_dbg(priv, hw, priv->netdev,
+                       "RDMA cfg, num_rx_bds=%d, rx_bds=%p\n",
+                       priv->num_rx_bds, priv->rx_bds);
+
+       return 0;
+}
+
+static void bcm_sysport_fini_rx_ring(struct bcm_sysport_priv *priv)
+{
+       struct bcm_sysport_cb *cb;
+       unsigned int i;
+       u32 reg;
+
+       /* Caller should ensure RDMA is disabled */
+       reg = rdma_readl(priv, RDMA_STATUS);
+       if (!(reg & RDMA_DISABLED))
+               netdev_warn(priv->netdev, "RDMA not stopped!\n");
+
+       for (i = 0; i < priv->num_rx_bds; i++) {
+               cb = &priv->rx_cbs[i];
+               if (dma_unmap_addr(cb, dma_addr))
+                       dma_unmap_single(&priv->pdev->dev,
+                                       dma_unmap_addr(cb, dma_addr),
+                                       RX_BUF_LENGTH, DMA_FROM_DEVICE);
+               bcm_sysport_free_cb(cb);
+       }
+
+       kfree(priv->rx_cbs);
+       priv->rx_cbs = NULL;
+
+       netif_dbg(priv, hw, priv->netdev, "RDMA fini done\n");
+}
+
+static void bcm_sysport_set_rx_mode(struct net_device *dev)
+{
+       struct bcm_sysport_priv *priv = netdev_priv(dev);
+       u32 reg;
+
+       reg = umac_readl(priv, UMAC_CMD);
+       if (dev->flags & IFF_PROMISC)
+               reg |= CMD_PROMISC;
+       else
+               reg &= ~CMD_PROMISC;
+       umac_writel(priv, reg, UMAC_CMD);
+
+       /* No support for ALLMULTI */
+       if (dev->flags & IFF_ALLMULTI)
+               return;
+}
+
+static inline void umac_enable_set(struct bcm_sysport_priv *priv,
+                                       unsigned int enable)
+{
+       u32 reg;
+
+       reg = umac_readl(priv, UMAC_CMD);
+       if (enable)
+               reg |= CMD_RX_EN | CMD_TX_EN;
+       else
+               reg &= ~(CMD_RX_EN | CMD_TX_EN);
+       umac_writel(priv, reg, UMAC_CMD);
+
+       /* UniMAC stops on a packet boundary, wait for a full-sized packet
+        * to be processed (1 msec).
+        */
+       if (enable == 0)
+               usleep_range(1000, 2000);
+}
+
+static inline int umac_reset(struct bcm_sysport_priv *priv)
+{
+       unsigned int timeout = 0;
+       u32 reg;
+       int ret = 0;
+
+       umac_writel(priv, 0, UMAC_CMD);
+       while (timeout++ < 1000) {
+               reg = umac_readl(priv, UMAC_CMD);
+               if (!(reg & CMD_SW_RESET))
+                       break;
+
+               udelay(1);
+       }
+
+       if (timeout == 1000) {
+               dev_err(&priv->pdev->dev,
+                       "timeout waiting for MAC to come out of reset\n");
+               ret = -ETIMEDOUT;
+       }
+
+       return ret;
+}
+
+static void umac_set_hw_addr(struct bcm_sysport_priv *priv,
+                               unsigned char *addr)
+{
+       umac_writel(priv, (addr[0] << 24) | (addr[1] << 16) |
+                       (addr[2] << 8) | addr[3], UMAC_MAC0);
+       umac_writel(priv, (addr[4] << 8) | addr[5], UMAC_MAC1);
+}
+
+static void topctrl_flush(struct bcm_sysport_priv *priv)
+{
+       topctrl_writel(priv, RX_FLUSH, RX_FLUSH_CNTL);
+       topctrl_writel(priv, TX_FLUSH, TX_FLUSH_CNTL);
+       mdelay(1);
+       topctrl_writel(priv, 0, RX_FLUSH_CNTL);
+       topctrl_writel(priv, 0, TX_FLUSH_CNTL);
+}
+
+static int bcm_sysport_open(struct net_device *dev)
+{
+       struct bcm_sysport_priv *priv = netdev_priv(dev);
+       unsigned int i;
+       u32 reg;
+       int ret;
+
+       /* Reset UniMAC */
+       ret = umac_reset(priv);
+       if (ret) {
+               netdev_err(dev, "UniMAC reset failed\n");
+               return ret;
+       }
+
+       /* Flush TX and RX FIFOs at TOPCTRL level */
+       topctrl_flush(priv);
+
+       /* Disable the UniMAC RX/TX */
+       umac_enable_set(priv, 0);
+
+       /* Enable RBUF 2bytes alignment and Receive Status Block */
+       reg = rbuf_readl(priv, RBUF_CONTROL);
+       reg |= RBUF_4B_ALGN | RBUF_RSB_EN;
+       rbuf_writel(priv, reg, RBUF_CONTROL);
+
+       /* Set maximum frame length */
+       umac_writel(priv, UMAC_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN);
+
+       /* Set MAC address */
+       umac_set_hw_addr(priv, dev->dev_addr);
+
+       /* Read CRC forward */
+       priv->crc_fwd = !!(umac_readl(priv, UMAC_CMD) & CMD_CRC_FWD);
+
+       priv->phydev = of_phy_connect(dev, priv->phy_dn, bcm_sysport_adj_link,
+                                       0, priv->phy_interface);
+       if (!priv->phydev) {
+               netdev_err(dev, "could not attach to PHY\n");
+               return -ENODEV;
+       }
+
+       /* Reset house keeping link status */
+       priv->old_duplex = -1;
+       priv->old_link = -1;
+       priv->old_pause = -1;
+
+       /* mask all interrupts and request them */
+       intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_MASK_SET);
+       intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
+       intrl2_0_writel(priv, 0, INTRL2_CPU_MASK_CLEAR);
+       intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_MASK_SET);
+       intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
+       intrl2_1_writel(priv, 0, INTRL2_CPU_MASK_CLEAR);
+
+       ret = request_irq(priv->irq0, bcm_sysport_rx_isr, 0, dev->name, dev);
+       if (ret) {
+               netdev_err(dev, "failed to request RX interrupt\n");
+               goto out_phy_disconnect;
+       }
+
+       ret = request_irq(priv->irq1, bcm_sysport_tx_isr, 0, dev->name, dev);
+       if (ret) {
+               netdev_err(dev, "failed to request TX interrupt\n");
+               goto out_free_irq0;
+       }
+
+       /* Initialize both hardware and software ring */
+       for (i = 0; i < dev->num_tx_queues; i++) {
+               ret = bcm_sysport_init_tx_ring(priv, i);
+               if (ret) {
+                       netdev_err(dev, "failed to initialize TX ring %d\n",
+                                       i);
+                       goto out_free_tx_ring;
+               }
+       }
+
+       /* Initialize linked-list */
+       tdma_writel(priv, TDMA_LL_RAM_INIT_BUSY, TDMA_STATUS);
+
+       /* Initialize RX ring */
+       ret = bcm_sysport_init_rx_ring(priv);
+       if (ret) {
+               netdev_err(dev, "failed to initialize RX ring\n");
+               goto out_free_rx_ring;
+       }
+
+       /* Turn on RDMA */
+       ret = rdma_enable_set(priv, 1);
+       if (ret)
+               goto out_free_rx_ring;
+
+       /* Enable RX interrupt and TX ring full interrupt */
+       intrl2_0_mask_clear(priv, INTRL2_0_RDMA_MBDONE | INTRL2_0_TX_RING_FULL);
+
+       /* Turn on TDMA */
+       ret = tdma_enable_set(priv, 1);
+       if (ret)
+               goto out_clear_rx_int;
+
+       /* Enable NAPI */
+       napi_enable(&priv->napi);
+
+       /* Turn on UniMAC TX/RX */
+       umac_enable_set(priv, 1);
+
+       phy_start(priv->phydev);
+
+       /* Enable TX interrupts for the 32 TXQs */
+       intrl2_1_mask_clear(priv, 0xffffffff);
+
+       /* Last call before we start the real business */
+       netif_tx_start_all_queues(dev);
+
+       return 0;
+
+out_clear_rx_int:
+       intrl2_0_mask_set(priv, INTRL2_0_RDMA_MBDONE | INTRL2_0_TX_RING_FULL);
+out_free_rx_ring:
+       bcm_sysport_fini_rx_ring(priv);
+out_free_tx_ring:
+       for (i = 0; i < dev->num_tx_queues; i++)
+               bcm_sysport_fini_tx_ring(priv, i);
+       free_irq(priv->irq1, dev);
+out_free_irq0:
+       free_irq(priv->irq0, dev);
+out_phy_disconnect:
+       phy_disconnect(priv->phydev);
+       return ret;
+}
+
+static int bcm_sysport_stop(struct net_device *dev)
+{
+       struct bcm_sysport_priv *priv = netdev_priv(dev);
+       unsigned int i;
+       u32 reg;
+       int ret;
+
+       /* stop all software from updating hardware */
+       netif_tx_stop_all_queues(dev);
+       napi_disable(&priv->napi);
+       phy_stop(priv->phydev);
+
+       /* mask all interrupts */
+       intrl2_0_mask_set(priv, 0xffffffff);
+       intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
+       intrl2_1_mask_set(priv, 0xffffffff);
+       intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
+
+       /* Disable UniMAC RX */
+       reg = umac_readl(priv, UMAC_CMD);
+       reg &= ~CMD_RX_EN;
+       umac_writel(priv, reg, UMAC_CMD);
+
+       ret = tdma_enable_set(priv, 0);
+       if (ret) {
+               netdev_err(dev, "timeout disabling RDMA\n");
+               return ret;
+       }
+
+       /* Wait for a maximum packet size to be drained */
+       usleep_range(2000, 3000);
+
+       ret = rdma_enable_set(priv, 0);
+       if (ret) {
+               netdev_err(dev, "timeout disabling TDMA\n");
+               return ret;
+       }
+
+       /* Disable UniMAC TX */
+       reg = umac_readl(priv, UMAC_CMD);
+       reg &= ~CMD_TX_EN;
+       umac_writel(priv, reg, UMAC_CMD);
+
+       /* Free RX/TX rings SW structures */
+       for (i = 0; i < dev->num_tx_queues; i++)
+               bcm_sysport_fini_tx_ring(priv, i);
+       bcm_sysport_fini_rx_ring(priv);
+
+       free_irq(priv->irq0, dev);
+       free_irq(priv->irq1, dev);
+
+       /* Disconnect from PHY */
+       phy_disconnect(priv->phydev);
+
+       return 0;
+}
+
+static struct ethtool_ops bcm_sysport_ethtool_ops = {
+       .get_settings           = bcm_sysport_get_settings,
+       .set_settings           = bcm_sysport_set_settings,
+       .get_drvinfo            = bcm_sysport_get_drvinfo,
+       .get_msglevel           = bcm_sysport_get_msglvl,
+       .set_msglevel           = bcm_sysport_set_msglvl,
+       .get_link               = ethtool_op_get_link,
+       .get_strings            = bcm_sysport_get_strings,
+       .get_ethtool_stats      = bcm_sysport_get_stats,
+       .get_sset_count         = bcm_sysport_get_sset_count,
+};
+
+static const struct net_device_ops bcm_sysport_netdev_ops = {
+       .ndo_start_xmit         = bcm_sysport_xmit,
+       .ndo_tx_timeout         = bcm_sysport_tx_timeout,
+       .ndo_open               = bcm_sysport_open,
+       .ndo_stop               = bcm_sysport_stop,
+       .ndo_set_features       = bcm_sysport_set_features,
+       .ndo_set_rx_mode        = bcm_sysport_set_rx_mode,
+};
+
+#define REV_FMT        "v%2x.%02x"
+
+static int bcm_sysport_probe(struct platform_device *pdev)
+{
+       struct bcm_sysport_priv *priv;
+       struct device_node *dn;
+       struct net_device *dev;
+       const void *macaddr;
+       struct resource *r;
+       u32 txq, rxq;
+       int ret;
+
+       dn = pdev->dev.of_node;
+       r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+       /* Read the Transmit/Receive Queue properties */
+       if (of_property_read_u32(dn, "systemport,num-txq", &txq))
+               txq = TDMA_NUM_RINGS;
+       if (of_property_read_u32(dn, "systemport,num-rxq", &rxq))
+               rxq = 1;
+
+       dev = alloc_etherdev_mqs(sizeof(*priv), txq, rxq);
+       if (!dev)
+               return -ENOMEM;
+
+       /* Initialize private members */
+       priv = netdev_priv(dev);
+
+       priv->irq0 = platform_get_irq(pdev, 0);
+       priv->irq1 = platform_get_irq(pdev, 1);
+       if (priv->irq0 <= 0 || priv->irq1 <= 0) {
+               dev_err(&pdev->dev, "invalid interrupts\n");
+               ret = -EINVAL;
+               goto err;
+       }
+
+       priv->base = devm_ioremap_resource(&pdev->dev, r);
+       if (IS_ERR(priv->base)) {
+               ret = PTR_ERR(priv->base);
+               goto err;
+       }
+
+       priv->netdev = dev;
+       priv->pdev = pdev;
+
+       priv->phy_interface = of_get_phy_mode(dn);
+       /* Default to GMII interface mode */
+       if (priv->phy_interface < 0)
+               priv->phy_interface = PHY_INTERFACE_MODE_GMII;
+
+       /* In the case of a fixed PHY, the DT node associated
+        * to the PHY is the Ethernet MAC DT node.
+        */
+       if (of_phy_is_fixed_link(dn)) {
+               ret = of_phy_register_fixed_link(dn);
+               if (ret) {
+                       dev_err(&pdev->dev, "failed to register fixed PHY\n");
+                       goto err;
+               }
+
+               priv->phy_dn = dn;
+       }
+
+       /* Initialize netdevice members */
+       macaddr = of_get_mac_address(dn);
+       if (!macaddr || !is_valid_ether_addr(macaddr)) {
+               dev_warn(&pdev->dev, "using random Ethernet MAC\n");
+               random_ether_addr(dev->dev_addr);
+       } else {
+               ether_addr_copy(dev->dev_addr, macaddr);
+       }
+
+       SET_NETDEV_DEV(dev, &pdev->dev);
+       dev_set_drvdata(&pdev->dev, dev);
+       dev->ethtool_ops = &bcm_sysport_ethtool_ops;
+       dev->netdev_ops = &bcm_sysport_netdev_ops;
+       netif_napi_add(dev, &priv->napi, bcm_sysport_poll, 64);
+
+       /* HW supported features, none enabled by default */
+       dev->hw_features |= NETIF_F_RXCSUM | NETIF_F_HIGHDMA |
+                               NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+
+       /* Set the needed headroom once and for all */
+       BUILD_BUG_ON(sizeof(struct tsb) != 8);
+       dev->needed_headroom += sizeof(struct tsb);
+
+       /* We are interfaced to a switch which handles the multicast
+        * filtering for us, so we do not support programming any
+        * multicast hash table in this Ethernet MAC.
+        */
+       dev->flags &= ~IFF_MULTICAST;
+
+       ret = register_netdev(dev);
+       if (ret) {
+               dev_err(&pdev->dev, "failed to register net_device\n");
+               goto err;
+       }
+
+       priv->rev = topctrl_readl(priv, REV_CNTL) & REV_MASK;
+       dev_info(&pdev->dev,
+               "Broadcom SYSTEMPORT" REV_FMT
+               " at 0x%p (irqs: %d, %d, TXQs: %d, RXQs: %d)\n",
+               (priv->rev >> 8) & 0xff, priv->rev & 0xff,
+               priv->base, priv->irq0, priv->irq1, txq, rxq);
+
+       return 0;
+err:
+       free_netdev(dev);
+       return ret;
+}
+
+static int bcm_sysport_remove(struct platform_device *pdev)
+{
+       struct net_device *dev = dev_get_drvdata(&pdev->dev);
+
+       /* Not much to do, ndo_close has been called
+        * and we use managed allocations
+        */
+       unregister_netdev(dev);
+       free_netdev(dev);
+       dev_set_drvdata(&pdev->dev, NULL);
+
+       return 0;
+}
+
+static const struct of_device_id bcm_sysport_of_match[] = {
+       { .compatible = "brcm,systemport-v1.00" },
+       { .compatible = "brcm,systemport" },
+       { /* sentinel */ }
+};
+
+static struct platform_driver bcm_sysport_driver = {
+       .probe  = bcm_sysport_probe,
+       .remove = bcm_sysport_remove,
+       .driver =  {
+               .name = "brcm-systemport",
+               .owner = THIS_MODULE,
+               .of_match_table = bcm_sysport_of_match,
+       },
+};
+module_platform_driver(bcm_sysport_driver);
+
+MODULE_AUTHOR("Broadcom Corporation");
+MODULE_DESCRIPTION("Broadcom System Port Ethernet MAC driver");
+MODULE_ALIAS("platform:brcm-systemport");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.h b/drivers/net/ethernet/broadcom/bcmsysport.h
new file mode 100644 (file)
index 0000000..73fd04a
--- /dev/null
@@ -0,0 +1,678 @@
+/*
+ * Broadcom BCM7xxx System Port Ethernet MAC driver
+ *
+ * Copyright (C) 2014 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __BCM_SYSPORT_H
+#define __BCM_SYSPORT_H
+
+#include <linux/if_vlan.h>
+
+/* Receive/transmit descriptor format */
+#define DESC_ADDR_HI_STATUS_LEN        0x00
+#define  DESC_ADDR_HI_SHIFT    0
+#define  DESC_ADDR_HI_MASK     0xff
+#define  DESC_STATUS_SHIFT     8
+#define  DESC_STATUS_MASK      0x3ff
+#define  DESC_LEN_SHIFT                18
+#define  DESC_LEN_MASK         0x7fff
+#define DESC_ADDR_LO           0x04
+
+/* HW supports 40-bit addressing hence the */
+#define DESC_SIZE              (WORDS_PER_DESC * sizeof(u32))
+
+/* Default RX buffer allocation size */
+#define RX_BUF_LENGTH          2048
+
+/* Body(1500) + EH_SIZE(14) + VLANTAG(4) + BRCMTAG(4) + FCS(4) = 1526.
+ * 1536 is multiple of 256 bytes
+ */
+#define ENET_BRCM_TAG_LEN      4
+#define ENET_PAD               10
+#define UMAC_MAX_MTU_SIZE      (ETH_DATA_LEN + ETH_HLEN + VLAN_HLEN + \
+                                ENET_BRCM_TAG_LEN + ETH_FCS_LEN + ENET_PAD)
+
+/* Transmit status block */
+struct tsb {
+       u32 pcp_dei_vid;
+#define PCP_DEI_MASK           0xf
+#define VID_SHIFT              4
+#define VID_MASK               0xfff
+       u32 l4_ptr_dest_map;
+#define L4_CSUM_PTR_MASK       0x1ff
+#define L4_PTR_SHIFT           9
+#define L4_PTR_MASK            0x1ff
+#define L4_UDP                 (1 << 18)
+#define L4_LENGTH_VALID                (1 << 19)
+#define DEST_MAP_SHIFT         20
+#define DEST_MAP_MASK          0x1ff
+};
+
+/* Receive status block uses the same
+ * definitions as the DMA descriptor
+ */
+struct rsb {
+       u32 rx_status_len;
+       u32 brcm_egress_tag;
+};
+
+/* Common Receive/Transmit status bits */
+#define DESC_L4_CSUM           (1 << 7)
+#define DESC_SOP               (1 << 8)
+#define DESC_EOP               (1 << 9)
+
+/* Receive Status bits */
+#define RX_STATUS_UCAST                        0
+#define RX_STATUS_BCAST                        0x04
+#define RX_STATUS_MCAST                        0x08
+#define RX_STATUS_L2_MCAST             0x0c
+#define RX_STATUS_ERR                  (1 << 4)
+#define RX_STATUS_OVFLOW               (1 << 5)
+#define RX_STATUS_PARSE_FAIL           (1 << 6)
+
+/* Transmit Status bits */
+#define TX_STATUS_VLAN_NO_ACT          0x00
+#define TX_STATUS_VLAN_PCP_TSB         0x01
+#define TX_STATUS_VLAN_QUEUE           0x02
+#define TX_STATUS_VLAN_VID_TSB         0x03
+#define TX_STATUS_OWR_CRC              (1 << 2)
+#define TX_STATUS_APP_CRC              (1 << 3)
+#define TX_STATUS_BRCM_TAG_NO_ACT      0
+#define TX_STATUS_BRCM_TAG_ZERO                0x10
+#define TX_STATUS_BRCM_TAG_ONE_QUEUE   0x20
+#define TX_STATUS_BRCM_TAG_ONE_TSB     0x30
+#define TX_STATUS_SKIP_BYTES           (1 << 6)
+
+/* Specific register definitions */
+#define SYS_PORT_TOPCTRL_OFFSET                0
+#define REV_CNTL                       0x00
+#define  REV_MASK                      0xffff
+
+#define RX_FLUSH_CNTL                  0x04
+#define  RX_FLUSH                      (1 << 0)
+
+#define TX_FLUSH_CNTL                  0x08
+#define  TX_FLUSH                      (1 << 0)
+
+#define MISC_CNTL                      0x0c
+#define  SYS_CLK_SEL                   (1 << 0)
+#define  TDMA_EOP_SEL                  (1 << 1)
+
+/* Level-2 Interrupt controller offsets and defines */
+#define SYS_PORT_INTRL2_0_OFFSET       0x200
+#define SYS_PORT_INTRL2_1_OFFSET       0x240
+#define INTRL2_CPU_STATUS              0x00
+#define INTRL2_CPU_SET                 0x04
+#define INTRL2_CPU_CLEAR               0x08
+#define INTRL2_CPU_MASK_STATUS         0x0c
+#define INTRL2_CPU_MASK_SET            0x10
+#define INTRL2_CPU_MASK_CLEAR          0x14
+
+/* Level-2 instance 0 interrupt bits */
+#define INTRL2_0_GISB_ERR              (1 << 0)
+#define INTRL2_0_RBUF_OVFLOW           (1 << 1)
+#define INTRL2_0_TBUF_UNDFLOW          (1 << 2)
+#define INTRL2_0_MPD                   (1 << 3)
+#define INTRL2_0_BRCM_MATCH_TAG                (1 << 4)
+#define INTRL2_0_RDMA_MBDONE           (1 << 5)
+#define INTRL2_0_OVER_MAX_THRESH       (1 << 6)
+#define INTRL2_0_BELOW_HYST_THRESH     (1 << 7)
+#define INTRL2_0_FREE_LIST_EMPTY       (1 << 8)
+#define INTRL2_0_TX_RING_FULL          (1 << 9)
+#define INTRL2_0_DESC_ALLOC_ERR                (1 << 10)
+#define INTRL2_0_UNEXP_PKTSIZE_ACK     (1 << 11)
+
+/* RXCHK offset and defines */
+#define SYS_PORT_RXCHK_OFFSET          0x300
+
+#define RXCHK_CONTROL                  0x00
+#define  RXCHK_EN                      (1 << 0)
+#define  RXCHK_SKIP_FCS                        (1 << 1)
+#define  RXCHK_BAD_CSUM_DIS            (1 << 2)
+#define  RXCHK_BRCM_TAG_EN             (1 << 3)
+#define  RXCHK_BRCM_TAG_MATCH_SHIFT    4
+#define  RXCHK_BRCM_TAG_MATCH_MASK     0xff
+#define  RXCHK_PARSE_TNL               (1 << 12)
+#define  RXCHK_VIOL_EN                 (1 << 13)
+#define  RXCHK_VIOL_DIS                        (1 << 14)
+#define  RXCHK_INCOM_PKT               (1 << 15)
+#define  RXCHK_V6_DUPEXT_EN            (1 << 16)
+#define  RXCHK_V6_DUPEXT_DIS           (1 << 17)
+#define  RXCHK_ETHERTYPE_DIS           (1 << 18)
+#define  RXCHK_L2_HDR_DIS              (1 << 19)
+#define  RXCHK_L3_HDR_DIS              (1 << 20)
+#define  RXCHK_MAC_RX_ERR_DIS          (1 << 21)
+#define  RXCHK_PARSE_AUTH              (1 << 22)
+
+#define RXCHK_BRCM_TAG0                        0x04
+#define RXCHK_BRCM_TAG(i)              ((i) * RXCHK_BRCM_TAG0)
+#define RXCHK_BRCM_TAG0_MASK           0x24
+#define RXCHK_BRCM_TAG_MASK(i)         ((i) * RXCHK_BRCM_TAG0_MASK)
+#define RXCHK_BRCM_TAG_MATCH_STATUS    0x44
+#define RXCHK_ETHERTYPE                        0x48
+#define RXCHK_BAD_CSUM_CNTR            0x4C
+#define RXCHK_OTHER_DISC_CNTR          0x50
+
+/* TXCHCK offsets and defines */
+#define SYS_PORT_TXCHK_OFFSET          0x380
+#define TXCHK_PKT_RDY_THRESH           0x00
+
+/* Receive buffer offset and defines */
+#define SYS_PORT_RBUF_OFFSET           0x400
+
+#define RBUF_CONTROL                   0x00
+#define  RBUF_RSB_EN                   (1 << 0)
+#define  RBUF_4B_ALGN                  (1 << 1)
+#define  RBUF_BRCM_TAG_STRIP           (1 << 2)
+#define  RBUF_BAD_PKT_DISC             (1 << 3)
+#define  RBUF_RESUME_THRESH_SHIFT      4
+#define  RBUF_RESUME_THRESH_MASK       0xff
+#define  RBUF_OK_TO_SEND_SHIFT         12
+#define  RBUF_OK_TO_SEND_MASK          0xff
+#define  RBUF_CRC_REPLACE              (1 << 20)
+#define  RBUF_OK_TO_SEND_MODE          (1 << 21)
+#define  RBUF_RSB_SWAP                 (1 << 22)
+#define  RBUF_ACPI_EN                  (1 << 23)
+
+#define RBUF_PKT_RDY_THRESH            0x04
+
+#define RBUF_STATUS                    0x08
+#define  RBUF_WOL_MODE                 (1 << 0)
+#define  RBUF_MPD                      (1 << 1)
+#define  RBUF_ACPI                     (1 << 2)
+
+#define RBUF_OVFL_DISC_CNTR            0x0c
+#define RBUF_ERR_PKT_CNTR              0x10
+
+/* Transmit buffer offset and defines */
+#define SYS_PORT_TBUF_OFFSET           0x600
+
+#define TBUF_CONTROL                   0x00
+#define  TBUF_BP_EN                    (1 << 0)
+#define  TBUF_MAX_PKT_THRESH_SHIFT     1
+#define  TBUF_MAX_PKT_THRESH_MASK      0x1f
+#define  TBUF_FULL_THRESH_SHIFT                8
+#define  TBUF_FULL_THRESH_MASK         0x1f
+
+/* UniMAC offset and defines */
+#define SYS_PORT_UMAC_OFFSET           0x800
+
+#define UMAC_CMD                       0x008
+#define  CMD_TX_EN                     (1 << 0)
+#define  CMD_RX_EN                     (1 << 1)
+#define  CMD_SPEED_SHIFT               2
+#define  CMD_SPEED_10                  0
+#define  CMD_SPEED_100                 1
+#define  CMD_SPEED_1000                        2
+#define  CMD_SPEED_2500                        3
+#define  CMD_SPEED_MASK                        3
+#define  CMD_PROMISC                   (1 << 4)
+#define  CMD_PAD_EN                    (1 << 5)
+#define  CMD_CRC_FWD                   (1 << 6)
+#define  CMD_PAUSE_FWD                 (1 << 7)
+#define  CMD_RX_PAUSE_IGNORE           (1 << 8)
+#define  CMD_TX_ADDR_INS               (1 << 9)
+#define  CMD_HD_EN                     (1 << 10)
+#define  CMD_SW_RESET                  (1 << 13)
+#define  CMD_LCL_LOOP_EN               (1 << 15)
+#define  CMD_AUTO_CONFIG               (1 << 22)
+#define  CMD_CNTL_FRM_EN               (1 << 23)
+#define  CMD_NO_LEN_CHK                        (1 << 24)
+#define  CMD_RMT_LOOP_EN               (1 << 25)
+#define  CMD_PRBL_EN                   (1 << 27)
+#define  CMD_TX_PAUSE_IGNORE           (1 << 28)
+#define  CMD_TX_RX_EN                  (1 << 29)
+#define  CMD_RUNT_FILTER_DIS           (1 << 30)
+
+#define UMAC_MAC0                      0x00c
+#define UMAC_MAC1                      0x010
+#define UMAC_MAX_FRAME_LEN             0x014
+
+#define UMAC_TX_FLUSH                  0x334
+
+#define UMAC_MIB_START                 0x400
+
+/* There is a 0xC gap between the end of RX and beginning of TX stats and then
+ * between the end of TX stats and the beginning of the RX RUNT
+ */
+#define UMAC_MIB_STAT_OFFSET           0xc
+
+#define UMAC_MIB_CTRL                  0x580
+#define  MIB_RX_CNT_RST                        (1 << 0)
+#define  MIB_RUNT_CNT_RST              (1 << 1)
+#define  MIB_TX_CNT_RST                        (1 << 2)
+#define UMAC_MDF_CTRL                  0x650
+#define UMAC_MDF_ADDR                  0x654
+
+/* Receive DMA offset and defines */
+#define SYS_PORT_RDMA_OFFSET           0x2000
+
+#define RDMA_CONTROL                   0x1000
+#define  RDMA_EN                       (1 << 0)
+#define  RDMA_RING_CFG                 (1 << 1)
+#define  RDMA_DISC_EN                  (1 << 2)
+#define  RDMA_BUF_DATA_OFFSET_SHIFT    4
+#define  RDMA_BUF_DATA_OFFSET_MASK     0x3ff
+
+#define RDMA_STATUS                    0x1004
+#define  RDMA_DISABLED                 (1 << 0)
+#define  RDMA_DESC_RAM_INIT_BUSY       (1 << 1)
+#define  RDMA_BP_STATUS                        (1 << 2)
+
+#define RDMA_SCB_BURST_SIZE            0x1008
+
+#define RDMA_RING_BUF_SIZE             0x100c
+#define  RDMA_RING_SIZE_SHIFT          16
+
+#define RDMA_WRITE_PTR_HI              0x1010
+#define RDMA_WRITE_PTR_LO              0x1014
+#define RDMA_PROD_INDEX                        0x1018
+#define  RDMA_PROD_INDEX_MASK          0xffff
+
+#define RDMA_CONS_INDEX                        0x101c
+#define  RDMA_CONS_INDEX_MASK          0xffff
+
+#define RDMA_START_ADDR_HI             0x1020
+#define RDMA_START_ADDR_LO             0x1024
+#define RDMA_END_ADDR_HI               0x1028
+#define RDMA_END_ADDR_LO               0x102c
+
+#define RDMA_MBDONE_INTR               0x1030
+#define  RDMA_INTR_THRESH_MASK         0xff
+#define  RDMA_TIMEOUT_SHIFT            16
+#define  RDMA_TIMEOUT_MASK             0xffff
+
+#define RDMA_XON_XOFF_THRESH           0x1034
+#define  RDMA_XON_XOFF_THRESH_MASK     0xffff
+#define  RDMA_XOFF_THRESH_SHIFT                16
+
+#define RDMA_READ_PTR_HI               0x1038
+#define RDMA_READ_PTR_LO               0x103c
+
+#define RDMA_OVERRIDE                  0x1040
+#define  RDMA_LE_MODE                  (1 << 0)
+#define  RDMA_REG_MODE                 (1 << 1)
+
+#define RDMA_TEST                      0x1044
+#define  RDMA_TP_OUT_SEL               (1 << 0)
+#define  RDMA_MEM_SEL                  (1 << 1)
+
+#define RDMA_DEBUG                     0x1048
+
+/* Transmit DMA offset and defines */
+#define TDMA_NUM_RINGS                 32      /* rings = queues */
+#define TDMA_PORT_SIZE                 DESC_SIZE /* two 32-bits words */
+
+#define SYS_PORT_TDMA_OFFSET           0x4000
+#define TDMA_WRITE_PORT_OFFSET         0x0000
+#define TDMA_WRITE_PORT_HI(i)          (TDMA_WRITE_PORT_OFFSET + \
+                                       (i) * TDMA_PORT_SIZE)
+#define TDMA_WRITE_PORT_LO(i)          (TDMA_WRITE_PORT_OFFSET + \
+                                       sizeof(u32) + (i) * TDMA_PORT_SIZE)
+
+#define TDMA_READ_PORT_OFFSET          (TDMA_WRITE_PORT_OFFSET + \
+                                       (TDMA_NUM_RINGS * TDMA_PORT_SIZE))
+#define TDMA_READ_PORT_HI(i)           (TDMA_READ_PORT_OFFSET + \
+                                       (i) * TDMA_PORT_SIZE)
+#define TDMA_READ_PORT_LO(i)           (TDMA_READ_PORT_OFFSET + \
+                                       sizeof(u32) + (i) * TDMA_PORT_SIZE)
+
+#define TDMA_READ_PORT_CMD_OFFSET      (TDMA_READ_PORT_OFFSET + \
+                                       (TDMA_NUM_RINGS * TDMA_PORT_SIZE))
+#define TDMA_READ_PORT_CMD(i)          (TDMA_READ_PORT_CMD_OFFSET + \
+                                       (i) * sizeof(u32))
+
+#define TDMA_DESC_RING_00_BASE         (TDMA_READ_PORT_CMD_OFFSET + \
+                                       (TDMA_NUM_RINGS * sizeof(u32)))
+
+/* Register offsets and defines relatives to a specific ring number */
+#define RING_HEAD_TAIL_PTR             0x00
+#define  RING_HEAD_MASK                        0x7ff
+#define  RING_TAIL_SHIFT               11
+#define  RING_TAIL_MASK                        0x7ff
+#define  RING_FLUSH                    (1 << 24)
+#define  RING_EN                       (1 << 25)
+
+#define RING_COUNT                     0x04
+#define  RING_COUNT_MASK               0x7ff
+#define  RING_BUFF_DONE_SHIFT          11
+#define  RING_BUFF_DONE_MASK           0x7ff
+
+#define RING_MAX_HYST                  0x08
+#define  RING_MAX_THRESH_MASK          0x7ff
+#define  RING_HYST_THRESH_SHIFT                11
+#define  RING_HYST_THRESH_MASK         0x7ff
+
+#define RING_INTR_CONTROL              0x0c
+#define  RING_INTR_THRESH_MASK         0x7ff
+#define  RING_EMPTY_INTR_EN            (1 << 15)
+#define  RING_TIMEOUT_SHIFT            16
+#define  RING_TIMEOUT_MASK             0xffff
+
+#define RING_PROD_CONS_INDEX           0x10
+#define  RING_PROD_INDEX_MASK          0xffff
+#define  RING_CONS_INDEX_SHIFT         16
+#define  RING_CONS_INDEX_MASK          0xffff
+
+#define RING_MAPPING                   0x14
+#define  RING_QID_MASK                 0x3
+#define  RING_PORT_ID_SHIFT            3
+#define  RING_PORT_ID_MASK             0x7
+#define  RING_IGNORE_STATUS            (1 << 6)
+#define  RING_FAILOVER_EN              (1 << 7)
+#define  RING_CREDIT_SHIFT             8
+#define  RING_CREDIT_MASK              0xffff
+
+#define RING_PCP_DEI_VID               0x18
+#define  RING_VID_MASK                 0x7ff
+#define  RING_DEI                      (1 << 12)
+#define  RING_PCP_SHIFT                        13
+#define  RING_PCP_MASK                 0x7
+#define  RING_PKT_SIZE_ADJ_SHIFT       16
+#define  RING_PKT_SIZE_ADJ_MASK                0xf
+
+#define TDMA_DESC_RING_SIZE            28
+
+/* Defininition for a given TX ring base address */
+#define TDMA_DESC_RING_BASE(i)         (TDMA_DESC_RING_00_BASE + \
+                                       ((i) * TDMA_DESC_RING_SIZE))
+
+/* Ring indexed register addreses */
+#define TDMA_DESC_RING_HEAD_TAIL_PTR(i)        (TDMA_DESC_RING_BASE(i) + \
+                                       RING_HEAD_TAIL_PTR)
+#define TDMA_DESC_RING_COUNT(i)                (TDMA_DESC_RING_BASE(i) + \
+                                       RING_COUNT)
+#define TDMA_DESC_RING_MAX_HYST(i)     (TDMA_DESC_RING_BASE(i) + \
+                                       RING_MAX_HYST)
+#define TDMA_DESC_RING_INTR_CONTROL(i) (TDMA_DESC_RING_BASE(i) + \
+                                       RING_INTR_CONTROL)
+#define TDMA_DESC_RING_PROD_CONS_INDEX(i) \
+                                       (TDMA_DESC_RING_BASE(i) + \
+                                       RING_PROD_CONS_INDEX)
+#define TDMA_DESC_RING_MAPPING(i)      (TDMA_DESC_RING_BASE(i) + \
+                                       RING_MAPPING)
+#define TDMA_DESC_RING_PCP_DEI_VID(i)  (TDMA_DESC_RING_BASE(i) + \
+                                       RING_PCP_DEI_VID)
+
+#define TDMA_CONTROL                   0x600
+#define  TDMA_EN                       (1 << 0)
+#define  TSB_EN                                (1 << 1)
+#define  TSB_SWAP                      (1 << 2)
+#define  ACB_ALGO                      (1 << 3)
+#define  BUF_DATA_OFFSET_SHIFT         4
+#define  BUF_DATA_OFFSET_MASK          0x3ff
+#define  VLAN_EN                       (1 << 14)
+#define  SW_BRCM_TAG                   (1 << 15)
+#define  WNC_KPT_SIZE_UPDATE           (1 << 16)
+#define  SYNC_PKT_SIZE                 (1 << 17)
+#define  ACH_TXDONE_DELAY_SHIFT                18
+#define  ACH_TXDONE_DELAY_MASK         0xff
+
+#define TDMA_STATUS                    0x604
+#define  TDMA_DISABLED                 (1 << 0)
+#define  TDMA_LL_RAM_INIT_BUSY         (1 << 1)
+
+#define TDMA_SCB_BURST_SIZE            0x608
+#define TDMA_OVER_MAX_THRESH_STATUS    0x60c
+#define TDMA_OVER_HYST_THRESH_STATUS   0x610
+#define TDMA_TPID                      0x614
+
+#define TDMA_FREE_LIST_HEAD_TAIL_PTR   0x618
+#define  TDMA_FREE_HEAD_MASK           0x7ff
+#define  TDMA_FREE_TAIL_SHIFT          11
+#define  TDMA_FREE_TAIL_MASK           0x7ff
+
+#define TDMA_FREE_LIST_COUNT           0x61c
+#define  TDMA_FREE_LIST_COUNT_MASK     0x7ff
+
+#define TDMA_TIER2_ARB_CTRL            0x620
+#define  TDMA_ARB_MODE_RR              0
+#define  TDMA_ARB_MODE_WEIGHT_RR       0x1
+#define  TDMA_ARB_MODE_STRICT          0x2
+#define  TDMA_ARB_MODE_DEFICIT_RR      0x3
+#define  TDMA_CREDIT_SHIFT             4
+#define  TDMA_CREDIT_MASK              0xffff
+
+#define TDMA_TIER1_ARB_0_CTRL          0x624
+#define  TDMA_ARB_EN                   (1 << 0)
+
+#define TDMA_TIER1_ARB_0_QUEUE_EN      0x628
+#define TDMA_TIER1_ARB_1_CTRL          0x62c
+#define TDMA_TIER1_ARB_1_QUEUE_EN      0x630
+#define TDMA_TIER1_ARB_2_CTRL          0x634
+#define TDMA_TIER1_ARB_2_QUEUE_EN      0x638
+#define TDMA_TIER1_ARB_3_CTRL          0x63c
+#define TDMA_TIER1_ARB_3_QUEUE_EN      0x640
+
+#define TDMA_SCB_ENDIAN_OVERRIDE       0x644
+#define  TDMA_LE_MODE                  (1 << 0)
+#define  TDMA_REG_MODE                 (1 << 1)
+
+#define TDMA_TEST                      0x648
+#define  TDMA_TP_OUT_SEL               (1 << 0)
+#define  TDMA_MEM_TM                   (1 << 1)
+
+#define TDMA_DEBUG                     0x64c
+
+/* Transmit/Receive descriptor */
+struct dma_desc {
+       u32     addr_status_len;
+       u32     addr_lo;
+};
+
+/* Number of Receive hardware descriptor words */
+#define NUM_HW_RX_DESC_WORDS           1024
+/* Real number of usable descriptors */
+#define NUM_RX_DESC                    (NUM_HW_RX_DESC_WORDS / WORDS_PER_DESC)
+
+/* Internal linked-list RAM has up to 1536 entries */
+#define NUM_TX_DESC                    1536
+
+#define WORDS_PER_DESC                 (sizeof(struct dma_desc) / sizeof(u32))
+
+/* Rx/Tx common counter group.*/
+struct bcm_sysport_pkt_counters {
+       u32     cnt_64;         /* RO Received/Transmited 64 bytes packet */
+       u32     cnt_127;        /* RO Rx/Tx 127 bytes packet */
+       u32     cnt_255;        /* RO Rx/Tx 65-255 bytes packet */
+       u32     cnt_511;        /* RO Rx/Tx 256-511 bytes packet */
+       u32     cnt_1023;       /* RO Rx/Tx 512-1023 bytes packet */
+       u32     cnt_1518;       /* RO Rx/Tx 1024-1518 bytes packet */
+       u32     cnt_mgv;        /* RO Rx/Tx 1519-1522 good VLAN packet */
+       u32     cnt_2047;       /* RO Rx/Tx 1522-2047 bytes packet*/
+       u32     cnt_4095;       /* RO Rx/Tx 2048-4095 bytes packet*/
+       u32     cnt_9216;       /* RO Rx/Tx 4096-9216 bytes packet*/
+};
+
+/* RSV, Receive Status Vector */
+struct bcm_sysport_rx_counters {
+       struct  bcm_sysport_pkt_counters pkt_cnt;
+       u32     pkt;            /* RO (0x428) Received pkt count*/
+       u32     bytes;          /* RO Received byte count */
+       u32     mca;            /* RO # of Received multicast pkt */
+       u32     bca;            /* RO # of Receive broadcast pkt */
+       u32     fcs;            /* RO # of Received FCS error  */
+       u32     cf;             /* RO # of Received control frame pkt*/
+       u32     pf;             /* RO # of Received pause frame pkt */
+       u32     uo;             /* RO # of unknown op code pkt */
+       u32     aln;            /* RO # of alignment error count */
+       u32     flr;            /* RO # of frame length out of range count */
+       u32     cde;            /* RO # of code error pkt */
+       u32     fcr;            /* RO # of carrier sense error pkt */
+       u32     ovr;            /* RO # of oversize pkt*/
+       u32     jbr;            /* RO # of jabber count */
+       u32     mtue;           /* RO # of MTU error pkt*/
+       u32     pok;            /* RO # of Received good pkt */
+       u32     uc;             /* RO # of unicast pkt */
+       u32     ppp;            /* RO # of PPP pkt */
+       u32     rcrc;           /* RO (0x470),# of CRC match pkt */
+};
+
+/* TSV, Transmit Status Vector */
+struct bcm_sysport_tx_counters {
+       struct bcm_sysport_pkt_counters pkt_cnt;
+       u32     pkts;           /* RO (0x4a8) Transmited pkt */
+       u32     mca;            /* RO # of xmited multicast pkt */
+       u32     bca;            /* RO # of xmited broadcast pkt */
+       u32     pf;             /* RO # of xmited pause frame count */
+       u32     cf;             /* RO # of xmited control frame count */
+       u32     fcs;            /* RO # of xmited FCS error count */
+       u32     ovr;            /* RO # of xmited oversize pkt */
+       u32     drf;            /* RO # of xmited deferral pkt */
+       u32     edf;            /* RO # of xmited Excessive deferral pkt*/
+       u32     scl;            /* RO # of xmited single collision pkt */
+       u32     mcl;            /* RO # of xmited multiple collision pkt*/
+       u32     lcl;            /* RO # of xmited late collision pkt */
+       u32     ecl;            /* RO # of xmited excessive collision pkt*/
+       u32     frg;            /* RO # of xmited fragments pkt*/
+       u32     ncl;            /* RO # of xmited total collision count */
+       u32     jbr;            /* RO # of xmited jabber count*/
+       u32     bytes;          /* RO # of xmited byte count */
+       u32     pok;            /* RO # of xmited good pkt */
+       u32     uc;             /* RO (0x0x4f0)# of xmited unitcast pkt */
+};
+
+struct bcm_sysport_mib {
+       struct bcm_sysport_rx_counters rx;
+       struct bcm_sysport_tx_counters tx;
+       u32 rx_runt_cnt;
+       u32 rx_runt_fcs;
+       u32 rx_runt_fcs_align;
+       u32 rx_runt_bytes;
+       u32 rxchk_bad_csum;
+       u32 rxchk_other_pkt_disc;
+       u32 rbuf_ovflow_cnt;
+       u32 rbuf_err_cnt;
+};
+
+/* HW maintains a large list of counters */
+enum bcm_sysport_stat_type {
+       BCM_SYSPORT_STAT_NETDEV = -1,
+       BCM_SYSPORT_STAT_MIB_RX,
+       BCM_SYSPORT_STAT_MIB_TX,
+       BCM_SYSPORT_STAT_RUNT,
+       BCM_SYSPORT_STAT_RXCHK,
+       BCM_SYSPORT_STAT_RBUF,
+};
+
+/* Macros to help define ethtool statistics */
+#define STAT_NETDEV(m) { \
+       .stat_string = __stringify(m), \
+       .stat_sizeof = sizeof(((struct net_device_stats *)0)->m), \
+       .stat_offset = offsetof(struct net_device_stats, m), \
+       .type = BCM_SYSPORT_STAT_NETDEV, \
+}
+
+#define STAT_MIB(str, m, _type) { \
+       .stat_string = str, \
+       .stat_sizeof = sizeof(((struct bcm_sysport_priv *)0)->m), \
+       .stat_offset = offsetof(struct bcm_sysport_priv, m), \
+       .type = _type, \
+}
+
+#define STAT_MIB_RX(str, m) STAT_MIB(str, m, BCM_SYSPORT_STAT_MIB_RX)
+#define STAT_MIB_TX(str, m) STAT_MIB(str, m, BCM_SYSPORT_STAT_MIB_TX)
+#define STAT_RUNT(str, m) STAT_MIB(str, m, BCM_SYSPORT_STAT_RUNT)
+
+#define STAT_RXCHK(str, m, ofs) { \
+       .stat_string = str, \
+       .stat_sizeof = sizeof(((struct bcm_sysport_priv *)0)->m), \
+       .stat_offset = offsetof(struct bcm_sysport_priv, m), \
+       .type = BCM_SYSPORT_STAT_RXCHK, \
+       .reg_offset = ofs, \
+}
+
+#define STAT_RBUF(str, m, ofs) { \
+       .stat_string = str, \
+       .stat_sizeof = sizeof(((struct bcm_sysport_priv *)0)->m), \
+       .stat_offset = offsetof(struct bcm_sysport_priv, m), \
+       .type = BCM_SYSPORT_STAT_RBUF, \
+       .reg_offset = ofs, \
+}
+
+struct bcm_sysport_stats {
+       char stat_string[ETH_GSTRING_LEN];
+       int stat_sizeof;
+       int stat_offset;
+       enum bcm_sysport_stat_type type;
+       /* reg offset from UMAC base for misc counters */
+       u16 reg_offset;
+};
+
+/* Software house keeping helper structure */
+struct bcm_sysport_cb {
+       struct sk_buff  *skb;           /* SKB for RX packets */
+       void __iomem    *bd_addr;       /* Buffer descriptor PHYS addr */
+
+       DEFINE_DMA_UNMAP_ADDR(dma_addr);
+       DEFINE_DMA_UNMAP_LEN(dma_len);
+};
+
+/* Software view of the TX ring */
+struct bcm_sysport_tx_ring {
+       spinlock_t      lock;           /* Ring lock for tx reclaim/xmit */
+       struct napi_struct napi;        /* NAPI per tx queue */
+       dma_addr_t      desc_dma;       /* DMA cookie */
+       unsigned int    index;          /* Ring index */
+       unsigned int    size;           /* Ring current size */
+       unsigned int    alloc_size;     /* Ring one-time allocated size */
+       unsigned int    desc_count;     /* Number of descriptors */
+       unsigned int    curr_desc;      /* Current descriptor */
+       unsigned int    c_index;        /* Last consumer index */
+       unsigned int    p_index;        /* Current producer index */
+       struct bcm_sysport_cb *cbs;     /* Transmit control blocks */
+       struct dma_desc *desc_cpu;      /* CPU view of the descriptor */
+       struct bcm_sysport_priv *priv;  /* private context backpointer */
+};
+
+/* Driver private structure */
+struct bcm_sysport_priv {
+       void __iomem            *base;
+       u32                     irq0_stat;
+       u32                     irq0_mask;
+       u32                     irq1_stat;
+       u32                     irq1_mask;
+       struct napi_struct      napi ____cacheline_aligned;
+       struct net_device       *netdev;
+       struct platform_device  *pdev;
+       int                     irq0;
+       int                     irq1;
+
+       /* Transmit rings */
+       struct bcm_sysport_tx_ring tx_rings[TDMA_NUM_RINGS];
+
+       /* Receive queue */
+       void __iomem            *rx_bds;
+       void __iomem            *rx_bd_assign_ptr;
+       unsigned int            rx_bd_assign_index;
+       struct bcm_sysport_cb   *rx_cbs;
+       unsigned int            num_rx_bds;
+       unsigned int            rx_read_ptr;
+       unsigned int            rx_c_index;
+
+       /* PHY device */
+       struct device_node      *phy_dn;
+       struct phy_device       *phydev;
+       phy_interface_t         phy_interface;
+       int                     old_pause;
+       int                     old_link;
+       int                     old_duplex;
+
+       /* Misc fields */
+       unsigned int            rx_csum_en:1;
+       unsigned int            tsb_en:1;
+       unsigned int            crc_fwd:1;
+       u16                     rev;
+
+       /* MIB related fields */
+       struct bcm_sysport_mib  mib;
+
+       /* Ethtool */
+       u32                     msg_enable;
+};
+#endif /* __BCM_SYSPORT_H */
index 0297a79a38e16312c7fe24bfd7d1992b980a8f85..05c6af6c418fa45690d085885b0cca330b58c210 100644 (file)
@@ -1436,7 +1436,7 @@ static int bgmac_probe(struct bcma_device *core)
                return -ENOMEM;
        net_dev->netdev_ops = &bgmac_netdev_ops;
        net_dev->irq = core->irq;
-       SET_ETHTOOL_OPS(net_dev, &bgmac_ethtool_ops);
+       net_dev->ethtool_ops = &bgmac_ethtool_ops;
        bgmac = netdev_priv(net_dev);
        bgmac->net_dev = net_dev;
        bgmac->core = core;
index b6de05e3149b5604d818d5496cbbc23ab7bf64e8..03224090ecf9a94c790cf6498a0f285c9d24495e 100644 (file)
@@ -3506,8 +3506,6 @@ static const struct ethtool_ops bnx2x_vf_ethtool_ops = {
 
 void bnx2x_set_ethtool_ops(struct bnx2x *bp, struct net_device *netdev)
 {
-       if (IS_PF(bp))
-               SET_ETHTOOL_OPS(netdev, &bnx2x_ethtool_ops);
-       else /* vf */
-               SET_ETHTOOL_OPS(netdev, &bnx2x_vf_ethtool_ops);
+       netdev->ethtool_ops = (IS_PF(bp)) ?
+               &bnx2x_ethtool_ops : &bnx2x_vf_ethtool_ops;
 }
index a78edaccceee92d8f2439ac40f3b3ba887ec0000..3b0d43154e677bfe8fd1f66aac7d3c0a6afbcfd5 100644 (file)
@@ -10051,8 +10051,8 @@ static void bnx2x_prev_unload_close_mac(struct bnx2x *bp,
 #define BCM_5710_UNDI_FW_MF_MAJOR      (0x07)
 #define BCM_5710_UNDI_FW_MF_MINOR      (0x08)
 #define BCM_5710_UNDI_FW_MF_VERS       (0x05)
-#define BNX2X_PREV_UNDI_MF_PORT(p)     (0x1a150c + ((p) << 4))
-#define BNX2X_PREV_UNDI_MF_FUNC(f)     (0x1a184c + ((f) << 4))
+#define BNX2X_PREV_UNDI_MF_PORT(p) (BAR_TSTRORM_INTMEM + 0x150c + ((p) << 4))
+#define BNX2X_PREV_UNDI_MF_FUNC(f) (BAR_TSTRORM_INTMEM + 0x184c + ((f) << 4))
 static bool bnx2x_prev_unload_undi_fw_supports_mf(struct bnx2x *bp)
 {
        u8 major, minor, version;
@@ -10352,6 +10352,7 @@ static int bnx2x_prev_unload_common(struct bnx2x *bp)
        /* Reset should be performed after BRB is emptied */
        if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_BRB1) {
                u32 timer_count = 1000;
+               bool need_write = true;
 
                /* Close the MAC Rx to prevent BRB from filling up */
                bnx2x_prev_unload_close_mac(bp, &mac_vals);
@@ -10398,7 +10399,10 @@ static int bnx2x_prev_unload_common(struct bnx2x *bp)
                         * cleaning methods - might be redundant but harmless.
                         */
                        if (bnx2x_prev_unload_undi_fw_supports_mf(bp)) {
-                               bnx2x_prev_unload_undi_mf(bp);
+                               if (need_write) {
+                                       bnx2x_prev_unload_undi_mf(bp);
+                                       need_write = false;
+                               }
                        } else if (prev_undi) {
                                /* If UNDI resides in memory,
                                 * manually increment it
@@ -13233,6 +13237,8 @@ static void __bnx2x_remove(struct pci_dev *pdev,
                                iounmap(bp->doorbells);
 
                        bnx2x_release_firmware(bp);
+               } else {
+                       bnx2x_vf_pci_dealloc(bp);
                }
                bnx2x_free_mem_bp(bp);
 
index 5c523b32db70126720dbf0b2914dcbb1a3391a2b..81cc2d9831c2192edcf414fa90dd4aa5962285aa 100644 (file)
@@ -427,7 +427,9 @@ static int bnx2x_vf_mac_vlan_config(struct bnx2x *bp,
        if (filter->add && filter->type == BNX2X_VF_FILTER_VLAN &&
            (atomic_read(&bnx2x_vfq(vf, qid, vlan_count)) >=
             vf_vlan_rules_cnt(vf))) {
-               BNX2X_ERR("No credits for vlan\n");
+               BNX2X_ERR("No credits for vlan [%d >= %d]\n",
+                         atomic_read(&bnx2x_vfq(vf, qid, vlan_count)),
+                         vf_vlan_rules_cnt(vf));
                return -ENOMEM;
        }
 
@@ -610,6 +612,7 @@ int bnx2x_vf_mcast(struct bnx2x *bp, struct bnx2x_virtf *vf,
                }
 
                /* add new mcasts */
+               mcast.mcast_list_len = mc_num;
                rc = bnx2x_config_mcast(bp, &mcast, BNX2X_MCAST_CMD_ADD);
                if (rc)
                        BNX2X_ERR("Faled to add multicasts\n");
@@ -837,6 +840,29 @@ int bnx2x_vf_flr_clnup_epilog(struct bnx2x *bp, u8 abs_vfid)
        return 0;
 }
 
+static void bnx2x_iov_re_set_vlan_filters(struct bnx2x *bp,
+                                         struct bnx2x_virtf *vf,
+                                         int new)
+{
+       int num = vf_vlan_rules_cnt(vf);
+       int diff = new - num;
+       bool rc = true;
+
+       DP(BNX2X_MSG_IOV, "vf[%d] - %d vlan filter credits [previously %d]\n",
+          vf->abs_vfid, new, num);
+
+       if (diff > 0)
+               rc = bp->vlans_pool.get(&bp->vlans_pool, diff);
+       else if (diff < 0)
+               rc = bp->vlans_pool.put(&bp->vlans_pool, -diff);
+
+       if (rc)
+               vf_vlan_rules_cnt(vf) = new;
+       else
+               DP(BNX2X_MSG_IOV, "vf[%d] - Failed to configure vlan filter credits change\n",
+                  vf->abs_vfid);
+}
+
 /* must be called after the number of PF queues and the number of VFs are
  * both known
  */
@@ -854,9 +880,11 @@ bnx2x_iov_static_resc(struct bnx2x *bp, struct bnx2x_virtf *vf)
        resc->num_mac_filters = 1;
 
        /* divvy up vlan rules */
+       bnx2x_iov_re_set_vlan_filters(bp, vf, 0);
        vlan_count = bp->vlans_pool.check(&bp->vlans_pool);
        vlan_count = 1 << ilog2(vlan_count);
-       resc->num_vlan_filters = vlan_count / BNX2X_NR_VIRTFN(bp);
+       bnx2x_iov_re_set_vlan_filters(bp, vf,
+                                     vlan_count / BNX2X_NR_VIRTFN(bp));
 
        /* no real limitation */
        resc->num_mc_filters = 0;
@@ -1478,10 +1506,6 @@ int bnx2x_iov_nic_init(struct bnx2x *bp)
                bnx2x_iov_static_resc(bp, vf);
 
                /* queues are initialized during VF-ACQUIRE */
-
-               /* reserve the vf vlan credit */
-               bp->vlans_pool.get(&bp->vlans_pool, vf_vlan_rules_cnt(vf));
-
                vf->filter_state = 0;
                vf->sp_cl_id = bnx2x_fp(bp, 0, cl_id);
 
@@ -1912,11 +1936,12 @@ int bnx2x_vf_chk_avail_resc(struct bnx2x *bp, struct bnx2x_virtf *vf,
        u8 rxq_cnt = vf_rxq_count(vf) ? : bnx2x_vf_max_queue_cnt(bp, vf);
        u8 txq_cnt = vf_txq_count(vf) ? : bnx2x_vf_max_queue_cnt(bp, vf);
 
+       /* Save a vlan filter for the Hypervisor */
        return ((req_resc->num_rxqs <= rxq_cnt) &&
                (req_resc->num_txqs <= txq_cnt) &&
                (req_resc->num_sbs <= vf_sb_count(vf))   &&
                (req_resc->num_mac_filters <= vf_mac_rules_cnt(vf)) &&
-               (req_resc->num_vlan_filters <= vf_vlan_rules_cnt(vf)));
+               (req_resc->num_vlan_filters <= vf_vlan_rules_visible_cnt(vf)));
 }
 
 /* CORE VF API */
@@ -1972,14 +1997,14 @@ int bnx2x_vf_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf,
        vf_txq_count(vf) = resc->num_txqs ? : bnx2x_vf_max_queue_cnt(bp, vf);
        if (resc->num_mac_filters)
                vf_mac_rules_cnt(vf) = resc->num_mac_filters;
-       if (resc->num_vlan_filters)
-               vf_vlan_rules_cnt(vf) = resc->num_vlan_filters;
+       /* Add an additional vlan filter credit for the hypervisor */
+       bnx2x_iov_re_set_vlan_filters(bp, vf, resc->num_vlan_filters + 1);
 
        DP(BNX2X_MSG_IOV,
           "Fulfilling vf request: sb count %d, tx_count %d, rx_count %d, mac_rules_count %d, vlan_rules_count %d\n",
           vf_sb_count(vf), vf_rxq_count(vf),
           vf_txq_count(vf), vf_mac_rules_cnt(vf),
-          vf_vlan_rules_cnt(vf));
+          vf_vlan_rules_visible_cnt(vf));
 
        /* Initialize the queues */
        if (!vf->vfqs) {
@@ -2896,6 +2921,14 @@ void __iomem *bnx2x_vf_doorbells(struct bnx2x *bp)
        return bp->regview + PXP_VF_ADDR_DB_START;
 }
 
+void bnx2x_vf_pci_dealloc(struct bnx2x *bp)
+{
+       BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->vf2pf_mbox_mapping,
+                      sizeof(struct bnx2x_vf_mbx_msg));
+       BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->pf2vf_bulletin_mapping,
+                      sizeof(union pf_vf_bulletin));
+}
+
 int bnx2x_vf_pci_alloc(struct bnx2x *bp)
 {
        mutex_init(&bp->vf2pf_mutex);
@@ -2915,10 +2948,7 @@ int bnx2x_vf_pci_alloc(struct bnx2x *bp)
        return 0;
 
 alloc_mem_err:
-       BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->vf2pf_mbox_mapping,
-                      sizeof(struct bnx2x_vf_mbx_msg));
-       BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->pf2vf_bulletin_mapping,
-                      sizeof(union pf_vf_bulletin));
+       bnx2x_vf_pci_dealloc(bp);
        return -ENOMEM;
 }
 
index 8bf764570eef773eafa87ffd0fca26592e4d64ef..cd4d624d8485b49d528cd480f9b0de1c46dfbd4a 100644 (file)
@@ -159,6 +159,8 @@ struct bnx2x_virtf {
 #define vf_mac_rules_cnt(vf)           ((vf)->alloc_resc.num_mac_filters)
 #define vf_vlan_rules_cnt(vf)          ((vf)->alloc_resc.num_vlan_filters)
 #define vf_mc_rules_cnt(vf)            ((vf)->alloc_resc.num_mc_filters)
+       /* Hide a single vlan filter credit for the hypervisor */
+#define vf_vlan_rules_visible_cnt(vf)  (vf_vlan_rules_cnt(vf) - 1)
 
        u8 sb_count;    /* actual number of SBs */
        u8 igu_base_id; /* base igu status block id */
@@ -502,6 +504,7 @@ static inline int bnx2x_vf_ustorm_prods_offset(struct bnx2x *bp,
 enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp);
 void bnx2x_timer_sriov(struct bnx2x *bp);
 void __iomem *bnx2x_vf_doorbells(struct bnx2x *bp);
+void bnx2x_vf_pci_dealloc(struct bnx2x *bp);
 int bnx2x_vf_pci_alloc(struct bnx2x *bp);
 int bnx2x_enable_sriov(struct bnx2x *bp);
 void bnx2x_disable_sriov(struct bnx2x *bp);
@@ -568,6 +571,7 @@ static inline void __iomem *bnx2x_vf_doorbells(struct bnx2x *bp)
        return NULL;
 }
 
+static inline void bnx2x_vf_pci_dealloc(struct bnx2x *bp) {}
 static inline int bnx2x_vf_pci_alloc(struct bnx2x *bp) {return 0; }
 static inline void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp) {}
 static inline int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs) {return 0; }
index 0622884596b2f478ec4a2789c17fdd0938544995..0c067e8564dd4e15e43a7e22f3e064091234b99a 100644 (file)
@@ -1163,7 +1163,7 @@ static void bnx2x_vf_mbx_acquire_resp(struct bnx2x *bp, struct bnx2x_virtf *vf,
                        bnx2x_vf_max_queue_cnt(bp, vf);
                resc->num_sbs = vf_sb_count(vf);
                resc->num_mac_filters = vf_mac_rules_cnt(vf);
-               resc->num_vlan_filters = vf_vlan_rules_cnt(vf);
+               resc->num_vlan_filters = vf_vlan_rules_visible_cnt(vf);
                resc->num_mc_filters = 0;
 
                if (status == PFVF_STATUS_SUCCESS) {
index 0966bd04375f1aa0384d4196b368bdf9345dc1f9..5ba1cfbd60da3555878fa8fd467c3a9a36c03642 100644 (file)
@@ -2481,7 +2481,7 @@ static int bcmgenet_probe(struct platform_device *pdev)
        dev_set_drvdata(&pdev->dev, dev);
        ether_addr_copy(dev->dev_addr, macaddr);
        dev->watchdog_timeo = 2 * HZ;
-       SET_ETHTOOL_OPS(dev, &bcmgenet_ethtool_ops);
+       dev->ethtool_ops = &bcmgenet_ethtool_ops;
        dev->netdev_ops = &bcmgenet_netdev_ops;
        netif_napi_add(dev, &priv->napi, bcmgenet_poll, 64);
 
index 4608673beaff9f7682d669e4cb7a540fed0c0f04..add8d8596084054ca1e059a360be4a1d24501122 100644 (file)
@@ -298,6 +298,7 @@ int bcmgenet_mii_config(struct net_device *dev)
 static int bcmgenet_mii_probe(struct net_device *dev)
 {
        struct bcmgenet_priv *priv = netdev_priv(dev);
+       struct device_node *dn = priv->pdev->dev.of_node;
        struct phy_device *phydev;
        unsigned int phy_flags;
        int ret;
@@ -307,15 +308,19 @@ static int bcmgenet_mii_probe(struct net_device *dev)
                return 0;
        }
 
-       if (priv->phy_dn)
-               phydev = of_phy_connect(dev, priv->phy_dn,
-                                       bcmgenet_mii_setup, 0,
-                                       priv->phy_interface);
-       else
-               phydev = of_phy_connect_fixed_link(dev,
-                                       bcmgenet_mii_setup,
-                                       priv->phy_interface);
+       /* In the case of a fixed PHY, the DT node associated
+        * to the PHY is the Ethernet MAC DT node.
+        */
+       if (of_phy_is_fixed_link(dn)) {
+               ret = of_phy_register_fixed_link(dn);
+               if (ret)
+                       return ret;
+
+               priv->phy_dn = dn;
+       }
 
+       phydev = of_phy_connect(dev, priv->phy_dn, bcmgenet_mii_setup, 0,
+                               priv->phy_interface);
        if (!phydev) {
                pr_err("could not attach to PHY\n");
                return -ENODEV;
index e5d95c5ce1ad8df29075dcc243650a0e15aa896d..ccd90156aebc848ffe0810d836281e202f028cee 100644 (file)
@@ -4,7 +4,7 @@
  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
  * Copyright (C) 2004 Sun Microsystems Inc.
- * Copyright (C) 2005-2013 Broadcom Corporation.
+ * Copyright (C) 2005-2014 Broadcom Corporation.
  *
  * Firmware is:
  *     Derived from proprietary unpublished source code,
@@ -94,10 +94,10 @@ static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
 
 #define DRV_MODULE_NAME                "tg3"
 #define TG3_MAJ_NUM                    3
-#define TG3_MIN_NUM                    136
+#define TG3_MIN_NUM                    137
 #define DRV_MODULE_VERSION     \
        __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
-#define DRV_MODULE_RELDATE     "Jan 03, 2014"
+#define DRV_MODULE_RELDATE     "May 11, 2014"
 
 #define RESET_KIND_SHUTDOWN    0
 #define RESET_KIND_INIT                1
@@ -7871,9 +7871,7 @@ tg3_tso_bug_end:
        return NETDEV_TX_OK;
 }
 
-/* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
- * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
- */
+/* hard_start_xmit for all devices */
 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
 {
        struct tg3 *tp = netdev_priv(dev);
@@ -7884,6 +7882,10 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
        struct tg3_napi *tnapi;
        struct netdev_queue *txq;
        unsigned int last;
+       struct iphdr *iph = NULL;
+       struct tcphdr *tcph = NULL;
+       __sum16 tcp_csum = 0, ip_csum = 0;
+       __be16 ip_tot_len = 0;
 
        txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
        tnapi = &tp->napi[skb_get_queue_mapping(skb)];
@@ -7915,7 +7917,6 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
 
        mss = skb_shinfo(skb)->gso_size;
        if (mss) {
-               struct iphdr *iph;
                u32 tcp_opt_len, hdr_len;
 
                if (skb_cow_head(skb, 0))
@@ -7927,27 +7928,31 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
                hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
 
                if (!skb_is_gso_v6(skb)) {
+                       if (unlikely((ETH_HLEN + hdr_len) > 80) &&
+                           tg3_flag(tp, TSO_BUG))
+                               return tg3_tso_bug(tp, skb);
+
+                       ip_csum = iph->check;
+                       ip_tot_len = iph->tot_len;
                        iph->check = 0;
                        iph->tot_len = htons(mss + hdr_len);
                }
 
-               if (unlikely((ETH_HLEN + hdr_len) > 80) &&
-                   tg3_flag(tp, TSO_BUG))
-                       return tg3_tso_bug(tp, skb);
-
                base_flags |= (TXD_FLAG_CPU_PRE_DMA |
                               TXD_FLAG_CPU_POST_DMA);
 
+               tcph = tcp_hdr(skb);
+               tcp_csum = tcph->check;
+
                if (tg3_flag(tp, HW_TSO_1) ||
                    tg3_flag(tp, HW_TSO_2) ||
                    tg3_flag(tp, HW_TSO_3)) {
-                       tcp_hdr(skb)->check = 0;
+                       tcph->check = 0;
                        base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
-               } else
-                       tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
-                                                                iph->daddr, 0,
-                                                                IPPROTO_TCP,
-                                                                0);
+               } else {
+                       tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
+                                                        0, IPPROTO_TCP, 0);
+               }
 
                if (tg3_flag(tp, HW_TSO_3)) {
                        mss |= (hdr_len & 0xc) << 12;
@@ -8047,6 +8052,18 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
        if (would_hit_hwbug) {
                tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
 
+               if (mss) {
+                       /* If it's a TSO packet, do GSO instead of
+                        * allocating and copying to a large linear SKB
+                        */
+                       if (ip_tot_len) {
+                               iph->check = ip_csum;
+                               iph->tot_len = ip_tot_len;
+                       }
+                       tcph->check = tcp_csum;
+                       return tg3_tso_bug(tp, skb);
+               }
+
                /* If the workaround fails due to memory/mapping
                 * failure, silently drop this packet.
                 */
index 04321e5a356e45a0f7fc642f3817035d983dbd90..461accaf0aa40242c3756880dd6659371cdfe5f0 100644 (file)
@@ -4,7 +4,7 @@
  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
  * Copyright (C) 2001 Jeff Garzik (jgarzik@pobox.com)
  * Copyright (C) 2004 Sun Microsystems Inc.
- * Copyright (C) 2007-2013 Broadcom Corporation.
+ * Copyright (C) 2007-2014 Broadcom Corporation.
  */
 
 #ifndef _T3_H
index f9e150825bb58bf0ef9e3568a1084cb8ebc61d37..adca62b728371d9b9e1e70e68c8870c02398eb00 100644 (file)
@@ -1137,5 +1137,5 @@ static const struct ethtool_ops bnad_ethtool_ops = {
 void
 bnad_set_ethtool_ops(struct net_device *netdev)
 {
-       SET_ETHTOOL_OPS(netdev, &bnad_ethtool_ops);
+       netdev->ethtool_ops = &bnad_ethtool_ops;
 }
index 7e49c43b7af3501f953cac4796e4769da542c29d..9e089d24466e65fb6c8b01c08f2ece882e23aaaa 100644 (file)
@@ -4,7 +4,7 @@
 
 config NET_CADENCE
        bool "Cadence devices"
-       depends on HAS_IOMEM && (ARM || AVR32 || COMPILE_TEST)
+       depends on HAS_IOMEM && (ARM || AVR32 || MICROBLAZE || COMPILE_TEST)
        default y
        ---help---
          If you have a network (Ethernet) card belonging to this class, say Y.
@@ -30,7 +30,7 @@ config ARM_AT91_ETHER
 
 config MACB
        tristate "Cadence MACB/GEM support"
-       depends on HAS_DMA && (PLATFORM_AT32AP || ARCH_AT91 || ARCH_PICOXCELL || ARCH_ZYNQ || COMPILE_TEST)
+       depends on HAS_DMA && (PLATFORM_AT32AP || ARCH_AT91 || ARCH_PICOXCELL || ARCH_ZYNQ || MICROBLAZE || COMPILE_TEST)
        select PHYLIB
        ---help---
          The Cadence MACB ethernet interface is found on many Atmel AT32 and
index ca97005e24b41217849beaf4b9a578fbaf1f2027..e9daa072ebb4f2c03af453255a9c374bde6ff47c 100644 (file)
@@ -599,25 +599,16 @@ static void gem_rx_refill(struct macb *bp)
 {
        unsigned int            entry;
        struct sk_buff          *skb;
-       struct macb_dma_desc    *desc;
        dma_addr_t              paddr;
 
        while (CIRC_SPACE(bp->rx_prepared_head, bp->rx_tail, RX_RING_SIZE) > 0) {
-               u32 addr, ctrl;
-
                entry = macb_rx_ring_wrap(bp->rx_prepared_head);
-               desc = &bp->rx_ring[entry];
 
                /* Make hw descriptor updates visible to CPU */
                rmb();
 
-               addr = desc->addr;
-               ctrl = desc->ctrl;
                bp->rx_prepared_head++;
 
-               if ((addr & MACB_BIT(RX_USED)))
-                       continue;
-
                if (bp->rx_skbuff[entry] == NULL) {
                        /* allocate sk_buff for this free entry in ring */
                        skb = netdev_alloc_skb(bp->dev, bp->rx_buffer_size);
@@ -698,7 +689,6 @@ static int gem_rx(struct macb *bp, int budget)
                if (!(addr & MACB_BIT(RX_USED)))
                        break;
 
-               desc->addr &= ~MACB_BIT(RX_USED);
                bp->rx_tail++;
                count++;
 
@@ -891,16 +881,15 @@ static int macb_poll(struct napi_struct *napi, int budget)
        if (work_done < budget) {
                napi_complete(napi);
 
-               /*
-                * We've done what we can to clean the buffers. Make sure we
-                * get notified when new packets arrive.
-                */
-               macb_writel(bp, IER, MACB_RX_INT_FLAGS);
-
                /* Packets received while interrupts were disabled */
                status = macb_readl(bp, RSR);
-               if (unlikely(status))
+               if (status) {
+                       if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
+                               macb_writel(bp, ISR, MACB_BIT(RCOMP));
                        napi_reschedule(napi);
+               } else {
+                       macb_writel(bp, IER, MACB_RX_INT_FLAGS);
+               }
        }
 
        /* TODO: Handle errors */
@@ -951,6 +940,10 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
                if (unlikely(status & (MACB_TX_ERR_FLAGS))) {
                        macb_writel(bp, IDR, MACB_TX_INT_FLAGS);
                        schedule_work(&bp->tx_error_task);
+
+                       if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
+                               macb_writel(bp, ISR, MACB_TX_ERR_FLAGS);
+
                        break;
                }
 
@@ -968,6 +961,9 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
                                bp->hw_stats.gem.rx_overruns++;
                        else
                                bp->hw_stats.macb.rx_overruns++;
+
+                       if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
+                               macb_writel(bp, ISR, MACB_BIT(ISR_ROVR));
                }
 
                if (status & MACB_BIT(HRESP)) {
@@ -977,6 +973,9 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
                         * (work queue?)
                         */
                        netdev_err(dev, "DMA bus error: HRESP not OK\n");
+
+                       if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
+                               macb_writel(bp, ISR, MACB_BIT(HRESP));
                }
 
                status = macb_readl(bp, ISR);
@@ -1113,7 +1112,7 @@ static void gem_free_rx_buffers(struct macb *bp)
 
                desc = &bp->rx_ring[i];
                addr = MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, desc->addr));
-               dma_unmap_single(&bp->pdev->dev, addr, skb->len,
+               dma_unmap_single(&bp->pdev->dev, addr, bp->rx_buffer_size,
                                 DMA_FROM_DEVICE);
                dev_kfree_skb_any(skb);
                skb = NULL;
index 521dfea44b837d57bc7a7297ac973cd4d8098d3e..25d6b2a10e4e6f7fb6b09d25706e11e2783bbea4 100644 (file)
@@ -1737,7 +1737,7 @@ static int xgmac_probe(struct platform_device *pdev)
        platform_set_drvdata(pdev, ndev);
        ether_setup(ndev);
        ndev->netdev_ops = &xgmac_netdev_ops;
-       SET_ETHTOOL_OPS(ndev, &xgmac_ethtool_ops);
+       ndev->ethtool_ops = &xgmac_ethtool_ops;
        spin_lock_init(&priv->stats_lock);
        INIT_WORK(&priv->tx_timeout_work, xgmac_tx_timeout_work);
 
index d40c994a4f6a2c807965e44eaf81f2be0053fb50..570222c3341070445b3b0c224205872f286a3cfd 100644 (file)
@@ -67,13 +67,13 @@ config CHELSIO_T3
          will be called cxgb3.
 
 config CHELSIO_T4
-       tristate "Chelsio Communications T4 Ethernet support"
+       tristate "Chelsio Communications T4/T5 Ethernet support"
        depends on PCI
        select FW_LOADER
        select MDIO
        ---help---
-         This driver supports Chelsio T4-based gigabit and 10Gb Ethernet
-         adapters.
+         This driver supports Chelsio T4 and T5 based gigabit, 10Gb Ethernet
+         adapter and T5 based 40Gb Ethernet adapter.
 
          For general information about Chelsio and our products, visit
          our website at <http://www.chelsio.com>.
@@ -87,11 +87,12 @@ config CHELSIO_T4
          will be called cxgb4.
 
 config CHELSIO_T4VF
-       tristate "Chelsio Communications T4 Virtual Function Ethernet support"
+       tristate "Chelsio Communications T4/T5 Virtual Function Ethernet support"
        depends on PCI
        ---help---
-         This driver supports Chelsio T4-based gigabit and 10Gb Ethernet
-         adapters with PCI-E SR-IOV Virtual Functions.
+         This driver supports Chelsio T4 and T5 based gigabit, 10Gb Ethernet
+         adapters and T5 based 40Gb Ethernet adapters with PCI-E SR-IOV Virtual
+         Functions.
 
          For general information about Chelsio and our products, visit
          our website at <http://www.chelsio.com>.
index 0fe7ff750d77e1618a9fd49a59bbf9577ed9190d..c1b2c1dbf015accf381c1f010610dcac625f618e 100644 (file)
@@ -1100,7 +1100,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 
                netif_napi_add(netdev, &adapter->napi, t1_poll, 64);
 
-               SET_ETHTOOL_OPS(netdev, &t1_ethtool_ops);
+               netdev->ethtool_ops = &t1_ethtool_ops;
        }
 
        if (t1_init_sw_modules(adapter, bi) < 0) {
index 07bbb711b7e5a716aba3e8d2e3e958e2ce8fa506..3ed50794724892979a969e7eefa33e6de2f074aa 100644 (file)
@@ -3291,7 +3291,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
                        netdev->features |= NETIF_F_HIGHDMA;
 
                netdev->netdev_ops = &cxgb_netdev_ops;
-               SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops);
+               netdev->ethtool_ops = &cxgb_ethtool_ops;
        }
 
        pci_set_drvdata(pdev, adapter);
index c0a9dd55f4e55215bb0e42902c12c97b241d0d3f..b0cbb2b7fd484f95ec36574feef7981753a552e9 100644 (file)
@@ -185,7 +185,7 @@ static struct net_device *get_iff_from_mac(struct adapter *adapter,
                if (ether_addr_equal(dev->dev_addr, mac)) {
                        rcu_read_lock();
                        if (vlan && vlan != VLAN_VID_MASK) {
-                               dev = __vlan_find_dev_deep(dev, htons(ETH_P_8021Q), vlan);
+                               dev = __vlan_find_dev_deep_rcu(dev, htons(ETH_P_8021Q), vlan);
                        } else if (netif_is_bond_slave(dev)) {
                                struct net_device *upper_dev;
 
index 6fe58913403ab24f61ddf7f7e8d02714e040d250..266a5bc6aedff349f6089aeb839bff668531c6d2 100644 (file)
@@ -2252,12 +2252,19 @@ static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
        else if (p->port_type == FW_PORT_TYPE_FIBER_XFI ||
                 p->port_type == FW_PORT_TYPE_FIBER_XAUI)
                cmd->port = PORT_FIBRE;
-       else if (p->port_type == FW_PORT_TYPE_SFP) {
-               if (p->mod_type == FW_PORT_MOD_TYPE_TWINAX_PASSIVE ||
-                   p->mod_type == FW_PORT_MOD_TYPE_TWINAX_ACTIVE)
+       else if (p->port_type == FW_PORT_TYPE_SFP ||
+                p->port_type == FW_PORT_TYPE_QSFP_10G ||
+                p->port_type == FW_PORT_TYPE_QSFP) {
+               if (p->mod_type == FW_PORT_MOD_TYPE_LR ||
+                   p->mod_type == FW_PORT_MOD_TYPE_SR ||
+                   p->mod_type == FW_PORT_MOD_TYPE_ER ||
+                   p->mod_type == FW_PORT_MOD_TYPE_LRM)
+                       cmd->port = PORT_FIBRE;
+               else if (p->mod_type == FW_PORT_MOD_TYPE_TWINAX_PASSIVE ||
+                        p->mod_type == FW_PORT_MOD_TYPE_TWINAX_ACTIVE)
                        cmd->port = PORT_DA;
                else
-                       cmd->port = PORT_FIBRE;
+                       cmd->port = PORT_OTHER;
        } else
                cmd->port = PORT_OTHER;
 
@@ -4061,7 +4068,7 @@ static int update_root_dev_clip(struct net_device *dev)
 
        /* Parse all bond and vlan devices layered on top of the physical dev */
        for (i = 0; i < VLAN_N_VID; i++) {
-               root_dev = __vlan_find_dev_deep(dev, htons(ETH_P_8021Q), i);
+               root_dev = __vlan_find_dev_deep_rcu(dev, htons(ETH_P_8021Q), i);
                if (!root_dev)
                        continue;
 
@@ -5870,6 +5877,8 @@ static void print_port_info(const struct net_device *dev)
                spd = " 2.5 GT/s";
        else if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_5_0GB)
                spd = " 5 GT/s";
+       else if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_8_0GB)
+               spd = " 8 GT/s";
 
        if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_100M)
                bufp += sprintf(bufp, "100/");
@@ -6074,7 +6083,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
                netdev->priv_flags |= IFF_UNICAST_FLT;
 
                netdev->netdev_ops = &cxgb4_netdev_ops;
-               SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops);
+               netdev->ethtool_ops = &cxgb_ethtool_ops;
        }
 
        pci_set_drvdata(pdev, adapter);
index ca95cf2954eb33f62719130a8b0432fbb324c2b6..cced1a3d5181337ea5560918370d5b3ebcaa5237 100644 (file)
@@ -1697,7 +1697,8 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
                return handle_trace_pkt(q->adap, si);
 
        pkt = (const struct cpl_rx_pkt *)rsp;
-       csum_ok = pkt->csum_calc && !pkt->err_vec;
+       csum_ok = pkt->csum_calc && !pkt->err_vec &&
+                 (q->netdev->features & NETIF_F_RXCSUM);
        if ((pkt->l2info & htonl(RXF_TCP)) &&
            (q->netdev->features & NETIF_F_GRO) && csum_ok && !pkt->ip_frag) {
                do_gro(rxq, si, pkt);
@@ -1720,8 +1721,7 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
 
        rxq->stats.pkts++;
 
-       if (csum_ok && (q->netdev->features & NETIF_F_RXCSUM) &&
-           (pkt->l2info & htonl(RXF_UDP | RXF_TCP))) {
+       if (csum_ok && (pkt->l2info & htonl(RXF_UDP | RXF_TCP))) {
                if (!pkt->ip_frag) {
                        skb->ip_summed = CHECKSUM_UNNECESSARY;
                        rxq->stats.rx_cso++;
index 52859288de7b4d5b8c550c9a1e53fe5845225da0..ff1cdd1788b5f62efdf03ffd2f501b65054383a9 100644 (file)
@@ -2664,7 +2664,7 @@ static int cxgb4vf_pci_probe(struct pci_dev *pdev,
                netdev->priv_flags |= IFF_UNICAST_FLT;
 
                netdev->netdev_ops = &cxgb4vf_netdev_ops;
-               SET_ETHTOOL_OPS(netdev, &cxgb4vf_ethtool_ops);
+               netdev->ethtool_ops = &cxgb4vf_ethtool_ops;
 
                /*
                 * Initialize the hardware/software state for the port.
index 9cfa4b4bb089d398a1b687a71d32f0856d15fa47..adebbf849cdbfab8c3f639a97ae80077aec84493 100644 (file)
@@ -1510,7 +1510,8 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp,
 {
        struct sk_buff *skb;
        const struct cpl_rx_pkt *pkt = (void *)rsp;
-       bool csum_ok = pkt->csum_calc && !pkt->err_vec;
+       bool csum_ok = pkt->csum_calc && !pkt->err_vec &&
+                      (rspq->netdev->features & NETIF_F_RXCSUM);
        struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq);
 
        /*
@@ -1538,8 +1539,8 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp,
        skb_record_rx_queue(skb, rspq->idx);
        rxq->stats.pkts++;
 
-       if (csum_ok && (rspq->netdev->features & NETIF_F_RXCSUM) &&
-           !pkt->err_vec && (be32_to_cpu(pkt->l2info) & (RXF_UDP|RXF_TCP))) {
+       if (csum_ok && !pkt->err_vec &&
+           (be32_to_cpu(pkt->l2info) & (RXF_UDP|RXF_TCP))) {
                if (!pkt->ip_frag)
                        skb->ip_summed = CHECKSUM_UNNECESSARY;
                else {
index e35c8e0202adda08dec6d53586bdd9f7da89a2e9..f23ef321606ca1326b02507139f953274964b84d 100644 (file)
@@ -43,6 +43,8 @@
 #define ENIC_CQ_MAX            (ENIC_WQ_MAX + ENIC_RQ_MAX)
 #define ENIC_INTR_MAX          (ENIC_CQ_MAX + 2)
 
+#define ENIC_AIC_LARGE_PKT_DIFF        3
+
 struct enic_msix_entry {
        int requested;
        char devname[IFNAMSIZ];
@@ -50,6 +52,33 @@ struct enic_msix_entry {
        void *devid;
 };
 
+/* Store only the lower range.  Higher range is given by fw. */
+struct enic_intr_mod_range {
+       u32 small_pkt_range_start;
+       u32 large_pkt_range_start;
+};
+
+struct enic_intr_mod_table {
+       u32 rx_rate;
+       u32 range_percent;
+};
+
+#define ENIC_MAX_LINK_SPEEDS           3
+#define ENIC_LINK_SPEED_10G            10000
+#define ENIC_LINK_SPEED_4G             4000
+#define ENIC_LINK_40G_INDEX            2
+#define ENIC_LINK_10G_INDEX            1
+#define ENIC_LINK_4G_INDEX             0
+#define ENIC_RX_COALESCE_RANGE_END     125
+#define ENIC_AIC_TS_BREAK              100
+
+struct enic_rx_coal {
+       u32 small_pkt_range_start;
+       u32 large_pkt_range_start;
+       u32 range_end;
+       u32 use_adaptive_rx_coalesce;
+};
+
 /* priv_flags */
 #define ENIC_SRIOV_ENABLED             (1 << 0)
 
@@ -92,6 +121,7 @@ struct enic {
        unsigned int mc_count;
        unsigned int uc_count;
        u32 port_mtu;
+       struct enic_rx_coal rx_coalesce_setting;
        u32 rx_coalesce_usecs;
        u32 tx_coalesce_usecs;
 #ifdef CONFIG_PCI_IOV
index 47e3562f48667232ad16be0e57cda9c618fac430..1882db230e139e506bfb1efc5248ac211ec76c2b 100644 (file)
@@ -79,6 +79,17 @@ static const struct enic_stat enic_rx_stats[] = {
 static const unsigned int enic_n_tx_stats = ARRAY_SIZE(enic_tx_stats);
 static const unsigned int enic_n_rx_stats = ARRAY_SIZE(enic_rx_stats);
 
+void enic_intr_coal_set_rx(struct enic *enic, u32 timer)
+{
+       int i;
+       int intr;
+
+       for (i = 0; i < enic->rq_count; i++) {
+               intr = enic_msix_rq_intr(enic, i);
+               vnic_intr_coalescing_timer_set(&enic->intr[intr], timer);
+       }
+}
+
 static int enic_get_settings(struct net_device *netdev,
        struct ethtool_cmd *ecmd)
 {
@@ -178,9 +189,14 @@ static int enic_get_coalesce(struct net_device *netdev,
        struct ethtool_coalesce *ecmd)
 {
        struct enic *enic = netdev_priv(netdev);
+       struct enic_rx_coal *rxcoal = &enic->rx_coalesce_setting;
 
        ecmd->tx_coalesce_usecs = enic->tx_coalesce_usecs;
        ecmd->rx_coalesce_usecs = enic->rx_coalesce_usecs;
+       if (rxcoal->use_adaptive_rx_coalesce)
+               ecmd->use_adaptive_rx_coalesce = 1;
+       ecmd->rx_coalesce_usecs_low = rxcoal->small_pkt_range_start;
+       ecmd->rx_coalesce_usecs_high = rxcoal->range_end;
 
        return 0;
 }
@@ -191,17 +207,31 @@ static int enic_set_coalesce(struct net_device *netdev,
        struct enic *enic = netdev_priv(netdev);
        u32 tx_coalesce_usecs;
        u32 rx_coalesce_usecs;
+       u32 rx_coalesce_usecs_low;
+       u32 rx_coalesce_usecs_high;
+       u32 coalesce_usecs_max;
        unsigned int i, intr;
+       struct enic_rx_coal *rxcoal = &enic->rx_coalesce_setting;
 
+       coalesce_usecs_max = vnic_dev_get_intr_coal_timer_max(enic->vdev);
        tx_coalesce_usecs = min_t(u32, ecmd->tx_coalesce_usecs,
-               vnic_dev_get_intr_coal_timer_max(enic->vdev));
+                                 coalesce_usecs_max);
        rx_coalesce_usecs = min_t(u32, ecmd->rx_coalesce_usecs,
-               vnic_dev_get_intr_coal_timer_max(enic->vdev));
+                                 coalesce_usecs_max);
+
+       rx_coalesce_usecs_low = min_t(u32, ecmd->rx_coalesce_usecs_low,
+                                     coalesce_usecs_max);
+       rx_coalesce_usecs_high = min_t(u32, ecmd->rx_coalesce_usecs_high,
+                                      coalesce_usecs_max);
 
        switch (vnic_dev_get_intr_mode(enic->vdev)) {
        case VNIC_DEV_INTR_MODE_INTX:
                if (tx_coalesce_usecs != rx_coalesce_usecs)
                        return -EINVAL;
+               if (ecmd->use_adaptive_rx_coalesce      ||
+                   ecmd->rx_coalesce_usecs_low         ||
+                   ecmd->rx_coalesce_usecs_high)
+                       return -EOPNOTSUPP;
 
                intr = enic_legacy_io_intr();
                vnic_intr_coalescing_timer_set(&enic->intr[intr],
@@ -210,6 +240,10 @@ static int enic_set_coalesce(struct net_device *netdev,
        case VNIC_DEV_INTR_MODE_MSI:
                if (tx_coalesce_usecs != rx_coalesce_usecs)
                        return -EINVAL;
+               if (ecmd->use_adaptive_rx_coalesce      ||
+                   ecmd->rx_coalesce_usecs_low         ||
+                   ecmd->rx_coalesce_usecs_high)
+                       return -EOPNOTSUPP;
 
                vnic_intr_coalescing_timer_set(&enic->intr[0],
                        tx_coalesce_usecs);
@@ -221,12 +255,27 @@ static int enic_set_coalesce(struct net_device *netdev,
                                tx_coalesce_usecs);
                }
 
-               for (i = 0; i < enic->rq_count; i++) {
-                       intr = enic_msix_rq_intr(enic, i);
-                       vnic_intr_coalescing_timer_set(&enic->intr[intr],
-                               rx_coalesce_usecs);
+               if (rxcoal->use_adaptive_rx_coalesce) {
+                       if (!ecmd->use_adaptive_rx_coalesce) {
+                               rxcoal->use_adaptive_rx_coalesce = 0;
+                               enic_intr_coal_set_rx(enic, rx_coalesce_usecs);
+                       }
+               } else {
+                       if (ecmd->use_adaptive_rx_coalesce)
+                               rxcoal->use_adaptive_rx_coalesce = 1;
+                       else
+                               enic_intr_coal_set_rx(enic, rx_coalesce_usecs);
                }
 
+               if (ecmd->rx_coalesce_usecs_high) {
+                       if (rx_coalesce_usecs_high <
+                           (rx_coalesce_usecs_low + ENIC_AIC_LARGE_PKT_DIFF))
+                               return -EINVAL;
+                       rxcoal->range_end = rx_coalesce_usecs_high;
+                       rxcoal->small_pkt_range_start = rx_coalesce_usecs_low;
+                       rxcoal->large_pkt_range_start = rx_coalesce_usecs_low +
+                                                       ENIC_AIC_LARGE_PKT_DIFF;
+               }
                break;
        default:
                break;
@@ -253,5 +302,5 @@ static const struct ethtool_ops enic_ethtool_ops = {
 
 void enic_set_ethtool_ops(struct net_device *netdev)
 {
-       SET_ETHTOOL_OPS(netdev, &enic_ethtool_ops);
+       netdev->ethtool_ops = &enic_ethtool_ops;
 }
index 2945718ce8068e4355628852ed200d539c9c2273..0d8995cc92ed65902e245eb40d665091c968e8cc 100644 (file)
@@ -38,6 +38,7 @@
 #include <linux/rtnetlink.h>
 #include <linux/prefetch.h>
 #include <net/ip6_checksum.h>
+#include <linux/ktime.h>
 
 #include "cq_enet_desc.h"
 #include "vnic_dev.h"
@@ -72,6 +73,35 @@ MODULE_LICENSE("GPL");
 MODULE_VERSION(DRV_VERSION);
 MODULE_DEVICE_TABLE(pci, enic_id_table);
 
+#define ENIC_LARGE_PKT_THRESHOLD               1000
+#define ENIC_MAX_COALESCE_TIMERS               10
+/*  Interrupt moderation table, which will be used to decide the
+ *  coalescing timer values
+ *  {rx_rate in Mbps, mapping percentage of the range}
+ */
+struct enic_intr_mod_table mod_table[ENIC_MAX_COALESCE_TIMERS + 1] = {
+       {4000,  0},
+       {4400, 10},
+       {5060, 20},
+       {5230, 30},
+       {5540, 40},
+       {5820, 50},
+       {6120, 60},
+       {6435, 70},
+       {6745, 80},
+       {7000, 90},
+       {0xFFFFFFFF, 100}
+};
+
+/* This table helps the driver to pick different ranges for rx coalescing
+ * timer depending on the link speed.
+ */
+struct enic_intr_mod_range mod_range[ENIC_MAX_LINK_SPEEDS] = {
+       {0,  0}, /* 0  - 4  Gbps */
+       {0,  3}, /* 4  - 10 Gbps */
+       {3,  6}, /* 10 - 40 Gbps */
+};
+
 int enic_is_dynamic(struct enic *enic)
 {
        return enic->pdev->device == PCI_DEVICE_ID_CISCO_VIC_ENET_DYN;
@@ -979,6 +1009,15 @@ static int enic_rq_alloc_buf(struct vnic_rq *rq)
        return 0;
 }
 
+static void enic_intr_update_pkt_size(struct vnic_rx_bytes_counter *pkt_size,
+                                     u32 pkt_len)
+{
+       if (ENIC_LARGE_PKT_THRESHOLD <= pkt_len)
+               pkt_size->large_pkt_bytes_cnt += pkt_len;
+       else
+               pkt_size->small_pkt_bytes_cnt += pkt_len;
+}
+
 static void enic_rq_indicate_buf(struct vnic_rq *rq,
        struct cq_desc *cq_desc, struct vnic_rq_buf *buf,
        int skipped, void *opaque)
@@ -986,6 +1025,7 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq,
        struct enic *enic = vnic_dev_priv(rq->vdev);
        struct net_device *netdev = enic->netdev;
        struct sk_buff *skb;
+       struct vnic_cq *cq = &enic->cq[enic_cq_rq(enic, rq->index)];
 
        u8 type, color, eop, sop, ingress_port, vlan_stripped;
        u8 fcoe, fcoe_sof, fcoe_fc_crc_ok, fcoe_enc_error, fcoe_eof;
@@ -1056,6 +1096,9 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq,
                        napi_gro_receive(&enic->napi[q_number], skb);
                else
                        netif_receive_skb(skb);
+               if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce)
+                       enic_intr_update_pkt_size(&cq->pkt_size_counter,
+                                                 bytes_written);
        } else {
 
                /* Buffer overflow
@@ -1134,6 +1177,64 @@ static int enic_poll(struct napi_struct *napi, int budget)
        return rq_work_done;
 }
 
+static void enic_set_int_moderation(struct enic *enic, struct vnic_rq *rq)
+{
+       unsigned int intr = enic_msix_rq_intr(enic, rq->index);
+       struct vnic_cq *cq = &enic->cq[enic_cq_rq(enic, rq->index)];
+       u32 timer = cq->tobe_rx_coal_timeval;
+
+       if (cq->tobe_rx_coal_timeval != cq->cur_rx_coal_timeval) {
+               vnic_intr_coalescing_timer_set(&enic->intr[intr], timer);
+               cq->cur_rx_coal_timeval = cq->tobe_rx_coal_timeval;
+       }
+}
+
+static void enic_calc_int_moderation(struct enic *enic, struct vnic_rq *rq)
+{
+       struct enic_rx_coal *rx_coal = &enic->rx_coalesce_setting;
+       struct vnic_cq *cq = &enic->cq[enic_cq_rq(enic, rq->index)];
+       struct vnic_rx_bytes_counter *pkt_size_counter = &cq->pkt_size_counter;
+       int index;
+       u32 timer;
+       u32 range_start;
+       u32 traffic;
+       u64 delta;
+       ktime_t now = ktime_get();
+
+       delta = ktime_us_delta(now, cq->prev_ts);
+       if (delta < ENIC_AIC_TS_BREAK)
+               return;
+       cq->prev_ts = now;
+
+       traffic = pkt_size_counter->large_pkt_bytes_cnt +
+                 pkt_size_counter->small_pkt_bytes_cnt;
+       /* The table takes Mbps
+        * traffic *= 8    => bits
+        * traffic *= (10^6 / delta)    => bps
+        * traffic /= 10^6     => Mbps
+        *
+        * Combining, traffic *= (8 / delta)
+        */
+
+       traffic <<= 3;
+       traffic /= delta;
+
+       for (index = 0; index < ENIC_MAX_COALESCE_TIMERS; index++)
+               if (traffic < mod_table[index].rx_rate)
+                       break;
+       range_start = (pkt_size_counter->small_pkt_bytes_cnt >
+                      pkt_size_counter->large_pkt_bytes_cnt << 1) ?
+                     rx_coal->small_pkt_range_start :
+                     rx_coal->large_pkt_range_start;
+       timer = range_start + ((rx_coal->range_end - range_start) *
+                              mod_table[index].range_percent / 100);
+       /* Damping */
+       cq->tobe_rx_coal_timeval = (timer + cq->tobe_rx_coal_timeval) >> 1;
+
+       pkt_size_counter->large_pkt_bytes_cnt = 0;
+       pkt_size_counter->small_pkt_bytes_cnt = 0;
+}
+
 static int enic_poll_msix(struct napi_struct *napi, int budget)
 {
        struct net_device *netdev = napi->dev;
@@ -1171,6 +1272,13 @@ static int enic_poll_msix(struct napi_struct *napi, int budget)
 
        if (err)
                work_done = work_to_do;
+       if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce)
+               /* Call the function which refreshes
+                * the intr coalescing timer value based on
+                * the traffic.  This is supported only in
+                * the case of MSI-x mode
+                */
+               enic_calc_int_moderation(enic, &enic->rq[rq]);
 
        if (work_done < work_to_do) {
 
@@ -1179,6 +1287,8 @@ static int enic_poll_msix(struct napi_struct *napi, int budget)
                 */
 
                napi_complete(napi);
+               if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce)
+                       enic_set_int_moderation(enic, &enic->rq[rq]);
                vnic_intr_unmask(&enic->intr[intr]);
        }
 
@@ -1314,6 +1424,42 @@ static void enic_synchronize_irqs(struct enic *enic)
        }
 }
 
+static void enic_set_rx_coal_setting(struct enic *enic)
+{
+       unsigned int speed;
+       int index = -1;
+       struct enic_rx_coal *rx_coal = &enic->rx_coalesce_setting;
+
+       /* If intr mode is not MSIX, do not do adaptive coalescing */
+       if (VNIC_DEV_INTR_MODE_MSIX != vnic_dev_get_intr_mode(enic->vdev)) {
+               netdev_info(enic->netdev, "INTR mode is not MSIX, Not initializing adaptive coalescing");
+               return;
+       }
+
+       /* 1. Read the link speed from fw
+        * 2. Pick the default range for the speed
+        * 3. Update it in enic->rx_coalesce_setting
+        */
+       speed = vnic_dev_port_speed(enic->vdev);
+       if (ENIC_LINK_SPEED_10G < speed)
+               index = ENIC_LINK_40G_INDEX;
+       else if (ENIC_LINK_SPEED_4G < speed)
+               index = ENIC_LINK_10G_INDEX;
+       else
+               index = ENIC_LINK_4G_INDEX;
+
+       rx_coal->small_pkt_range_start = mod_range[index].small_pkt_range_start;
+       rx_coal->large_pkt_range_start = mod_range[index].large_pkt_range_start;
+       rx_coal->range_end = ENIC_RX_COALESCE_RANGE_END;
+
+       /* Start with the value provided by UCSM */
+       for (index = 0; index < enic->rq_count; index++)
+               enic->cq[index].cur_rx_coal_timeval =
+                               enic->config.intr_timer_usec;
+
+       rx_coal->use_adaptive_rx_coalesce = 1;
+}
+
 static int enic_dev_notify_set(struct enic *enic)
 {
        int err;
@@ -2231,6 +2377,7 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        enic->notify_timer.function = enic_notify_timer;
        enic->notify_timer.data = (unsigned long)enic;
 
+       enic_set_rx_coal_setting(enic);
        INIT_WORK(&enic->reset, enic_reset);
        INIT_WORK(&enic->change_mtu_work, enic_change_mtu_work);
 
@@ -2250,6 +2397,9 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        }
 
        enic->tx_coalesce_usecs = enic->config.intr_timer_usec;
+       /* rx coalesce time already got initialized. This gets used
+        * if adaptive coal is turned off
+        */
        enic->rx_coalesce_usecs = enic->tx_coalesce_usecs;
 
        if (enic_is_dynamic(enic) || enic_is_sriov_vf(enic))
index 579315cbe803c91130c978b5e21538240aff3018..4e6aa65857f70a0b7236decf382c3018c137517c 100644 (file)
@@ -50,6 +50,11 @@ struct vnic_cq_ctrl {
        u32 pad10;
 };
 
+struct vnic_rx_bytes_counter {
+       unsigned int small_pkt_bytes_cnt;
+       unsigned int large_pkt_bytes_cnt;
+};
+
 struct vnic_cq {
        unsigned int index;
        struct vnic_dev *vdev;
@@ -58,6 +63,10 @@ struct vnic_cq {
        unsigned int to_clean;
        unsigned int last_color;
        unsigned int interrupt_offset;
+       struct vnic_rx_bytes_counter pkt_size_counter;
+       unsigned int cur_rx_coal_timeval;
+       unsigned int tobe_rx_coal_timeval;
+       ktime_t prev_ts;
 };
 
 static inline unsigned int vnic_cq_service(struct vnic_cq *cq,
index 1642de78aac84c86b51cbae27c16d9748d70fb19..861660841ce281c23a5790e62942a779998b5209 100644 (file)
@@ -1703,7 +1703,7 @@ static int tulip_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 #ifdef CONFIG_TULIP_NAPI
        netif_napi_add(dev, &tp->napi, tulip_poll, 16);
 #endif
-       SET_ETHTOOL_OPS(dev, &ops);
+       dev->ethtool_ops = &ops;
 
        if (register_netdev(dev))
                goto err_out_free_ring;
index 4fb756d219f700bfb82a37d62e9cb078fca22024..2324f2ddfd4821b991006dffedf213a1dad61fcf 100644 (file)
@@ -227,7 +227,7 @@ rio_probe1 (struct pci_dev *pdev, const struct pci_device_id *ent)
        }
        dev->netdev_ops = &netdev_ops;
        dev->watchdog_timeo = TX_TIMEOUT;
-       SET_ETHTOOL_OPS(dev, &ethtool_ops);
+       dev->ethtool_ops = &ethtool_ops;
 #if 0
        dev->features = NETIF_F_IP_CSUM;
 #endif
index d9e5ca0d48c125c88e55b319975fcab5e0a4ad99..433c1e18544250bd76dddb3e1e243d8b90ce3b73 100644 (file)
@@ -577,7 +577,7 @@ static int sundance_probe1(struct pci_dev *pdev,
 
        /* The chip-specific entries in the device structure. */
        dev->netdev_ops = &netdev_ops;
-       SET_ETHTOOL_OPS(dev, &ethtool_ops);
+       dev->ethtool_ops = &ethtool_ops;
        dev->watchdog_timeo = TX_TIMEOUT;
 
        pci_set_drvdata(pdev, dev);
diff --git a/drivers/net/ethernet/ec_bhf.c b/drivers/net/ethernet/ec_bhf.c
new file mode 100644 (file)
index 0000000..056b44b
--- /dev/null
@@ -0,0 +1,706 @@
+ /*
+ * drivers/net/ethernet/beckhoff/ec_bhf.c
+ *
+ * Copyright (C) 2014 Darek Marcinkiewicz <reksio@newterm.pl>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+/* This is a driver for EtherCAT master module present on CCAT FPGA.
+ * Those can be found on Bechhoff CX50xx industrial PCs.
+ */
+
+#if 0
+#define DEBUG
+#endif
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ip.h>
+#include <linux/skbuff.h>
+#include <linux/hrtimer.h>
+#include <linux/interrupt.h>
+#include <linux/stat.h>
+
+#define TIMER_INTERVAL_NSEC    20000
+
+#define INFO_BLOCK_SIZE                0x10
+#define INFO_BLOCK_TYPE                0x0
+#define INFO_BLOCK_REV         0x2
+#define INFO_BLOCK_BLK_CNT     0x4
+#define INFO_BLOCK_TX_CHAN     0x4
+#define INFO_BLOCK_RX_CHAN     0x5
+#define INFO_BLOCK_OFFSET      0x8
+
+#define EC_MII_OFFSET          0x4
+#define EC_FIFO_OFFSET         0x8
+#define EC_MAC_OFFSET          0xc
+
+#define MAC_FRAME_ERR_CNT      0x0
+#define MAC_RX_ERR_CNT         0x1
+#define MAC_CRC_ERR_CNT                0x2
+#define MAC_LNK_LST_ERR_CNT    0x3
+#define MAC_TX_FRAME_CNT       0x10
+#define MAC_RX_FRAME_CNT       0x14
+#define MAC_TX_FIFO_LVL                0x20
+#define MAC_DROPPED_FRMS       0x28
+#define MAC_CONNECTED_CCAT_FLAG        0x78
+
+#define MII_MAC_ADDR           0x8
+#define MII_MAC_FILT_FLAG      0xe
+#define MII_LINK_STATUS                0xf
+
+#define FIFO_TX_REG            0x0
+#define FIFO_TX_RESET          0x8
+#define FIFO_RX_REG            0x10
+#define FIFO_RX_ADDR_VALID     (1u << 31)
+#define FIFO_RX_RESET          0x18
+
+#define DMA_CHAN_OFFSET                0x1000
+#define DMA_CHAN_SIZE          0x8
+
+#define DMA_WINDOW_SIZE_MASK   0xfffffffc
+
+static struct pci_device_id ids[] = {
+       { PCI_DEVICE(0x15ec, 0x5000), },
+       { 0, }
+};
+MODULE_DEVICE_TABLE(pci, ids);
+
+struct rx_header {
+#define RXHDR_NEXT_ADDR_MASK   0xffffffu
+#define RXHDR_NEXT_VALID       (1u << 31)
+       __le32 next;
+#define RXHDR_NEXT_RECV_FLAG   0x1
+       __le32 recv;
+#define RXHDR_LEN_MASK         0xfffu
+       __le16 len;
+       __le16 port;
+       __le32 reserved;
+       u8 timestamp[8];
+} __packed;
+
+#define PKT_PAYLOAD_SIZE       0x7e8
+struct rx_desc {
+       struct rx_header header;
+       u8 data[PKT_PAYLOAD_SIZE];
+} __packed;
+
+struct tx_header {
+       __le16 len;
+#define TX_HDR_PORT_0          0x1
+#define TX_HDR_PORT_1          0x2
+       u8 port;
+       u8 ts_enable;
+#define TX_HDR_SENT            0x1
+       __le32 sent;
+       u8 timestamp[8];
+} __packed;
+
+struct tx_desc {
+       struct tx_header header;
+       u8 data[PKT_PAYLOAD_SIZE];
+} __packed;
+
+#define FIFO_SIZE              64
+
+static long polling_frequency = TIMER_INTERVAL_NSEC;
+
+struct bhf_dma {
+       u8 *buf;
+       size_t len;
+       dma_addr_t buf_phys;
+
+       u8 *alloc;
+       size_t alloc_len;
+       dma_addr_t alloc_phys;
+};
+
+struct ec_bhf_priv {
+       struct net_device *net_dev;
+
+       struct pci_dev *dev;
+
+       void __iomem *io;
+       void __iomem *dma_io;
+
+       struct hrtimer hrtimer;
+
+       int tx_dma_chan;
+       int rx_dma_chan;
+       void __iomem *ec_io;
+       void __iomem *fifo_io;
+       void __iomem *mii_io;
+       void __iomem *mac_io;
+
+       struct bhf_dma rx_buf;
+       struct rx_desc *rx_descs;
+       int rx_dnext;
+       int rx_dcount;
+
+       struct bhf_dma tx_buf;
+       struct tx_desc *tx_descs;
+       int tx_dcount;
+       int tx_dnext;
+
+       u64 stat_rx_bytes;
+       u64 stat_tx_bytes;
+};
+
+#define PRIV_TO_DEV(priv) (&(priv)->dev->dev)
+
+#define ETHERCAT_MASTER_ID     0x14
+
+static void ec_bhf_print_status(struct ec_bhf_priv *priv)
+{
+       struct device *dev = PRIV_TO_DEV(priv);
+
+       dev_dbg(dev, "Frame error counter: %d\n",
+               ioread8(priv->mac_io + MAC_FRAME_ERR_CNT));
+       dev_dbg(dev, "RX error counter: %d\n",
+               ioread8(priv->mac_io + MAC_RX_ERR_CNT));
+       dev_dbg(dev, "CRC error counter: %d\n",
+               ioread8(priv->mac_io + MAC_CRC_ERR_CNT));
+       dev_dbg(dev, "TX frame counter: %d\n",
+               ioread32(priv->mac_io + MAC_TX_FRAME_CNT));
+       dev_dbg(dev, "RX frame counter: %d\n",
+               ioread32(priv->mac_io + MAC_RX_FRAME_CNT));
+       dev_dbg(dev, "TX fifo level: %d\n",
+               ioread8(priv->mac_io + MAC_TX_FIFO_LVL));
+       dev_dbg(dev, "Dropped frames: %d\n",
+               ioread8(priv->mac_io + MAC_DROPPED_FRMS));
+       dev_dbg(dev, "Connected with CCAT slot: %d\n",
+               ioread8(priv->mac_io + MAC_CONNECTED_CCAT_FLAG));
+       dev_dbg(dev, "Link status: %d\n",
+               ioread8(priv->mii_io + MII_LINK_STATUS));
+}
+
+static void ec_bhf_reset(struct ec_bhf_priv *priv)
+{
+       iowrite8(0, priv->mac_io + MAC_FRAME_ERR_CNT);
+       iowrite8(0, priv->mac_io + MAC_RX_ERR_CNT);
+       iowrite8(0, priv->mac_io + MAC_CRC_ERR_CNT);
+       iowrite8(0, priv->mac_io + MAC_LNK_LST_ERR_CNT);
+       iowrite32(0, priv->mac_io + MAC_TX_FRAME_CNT);
+       iowrite32(0, priv->mac_io + MAC_RX_FRAME_CNT);
+       iowrite8(0, priv->mac_io + MAC_DROPPED_FRMS);
+
+       iowrite8(0, priv->fifo_io + FIFO_TX_RESET);
+       iowrite8(0, priv->fifo_io + FIFO_RX_RESET);
+
+       iowrite8(0, priv->mac_io + MAC_TX_FIFO_LVL);
+}
+
+static void ec_bhf_send_packet(struct ec_bhf_priv *priv, struct tx_desc *desc)
+{
+       u32 len = le16_to_cpu(desc->header.len) + sizeof(desc->header);
+       u32 addr = (u8 *)desc - priv->tx_buf.buf;
+
+       iowrite32((ALIGN(len, 8) << 24) | addr, priv->fifo_io + FIFO_TX_REG);
+
+       dev_dbg(PRIV_TO_DEV(priv), "Done sending packet\n");
+}
+
+static int ec_bhf_desc_sent(struct tx_desc *desc)
+{
+       return le32_to_cpu(desc->header.sent) & TX_HDR_SENT;
+}
+
+static void ec_bhf_process_tx(struct ec_bhf_priv *priv)
+{
+       if (unlikely(netif_queue_stopped(priv->net_dev))) {
+               /* Make sure that we perceive changes to tx_dnext. */
+               smp_rmb();
+
+               if (ec_bhf_desc_sent(&priv->tx_descs[priv->tx_dnext]))
+                       netif_wake_queue(priv->net_dev);
+       }
+}
+
+static int ec_bhf_pkt_received(struct rx_desc *desc)
+{
+       return le32_to_cpu(desc->header.recv) & RXHDR_NEXT_RECV_FLAG;
+}
+
+static void ec_bhf_add_rx_desc(struct ec_bhf_priv *priv, struct rx_desc *desc)
+{
+       iowrite32(FIFO_RX_ADDR_VALID | ((u8 *)(desc) - priv->rx_buf.buf),
+                 priv->fifo_io + FIFO_RX_REG);
+}
+
+static void ec_bhf_process_rx(struct ec_bhf_priv *priv)
+{
+       struct rx_desc *desc = &priv->rx_descs[priv->rx_dnext];
+       struct device *dev = PRIV_TO_DEV(priv);
+
+       while (ec_bhf_pkt_received(desc)) {
+               int pkt_size = (le16_to_cpu(desc->header.len) &
+                              RXHDR_LEN_MASK) - sizeof(struct rx_header) - 4;
+               u8 *data = desc->data;
+               struct sk_buff *skb;
+
+               skb = netdev_alloc_skb_ip_align(priv->net_dev, pkt_size);
+               dev_dbg(dev, "Received packet, size: %d\n", pkt_size);
+
+               if (skb) {
+                       memcpy(skb_put(skb, pkt_size), data, pkt_size);
+                       skb->protocol = eth_type_trans(skb, priv->net_dev);
+                       dev_dbg(dev, "Protocol type: %x\n", skb->protocol);
+
+                       priv->stat_rx_bytes += pkt_size;
+
+                       netif_rx(skb);
+               } else {
+                       dev_err_ratelimited(dev,
+                               "Couldn't allocate a skb_buff for a packet of size %u\n",
+                               pkt_size);
+               }
+
+               desc->header.recv = 0;
+
+               ec_bhf_add_rx_desc(priv, desc);
+
+               priv->rx_dnext = (priv->rx_dnext + 1) % priv->rx_dcount;
+               desc = &priv->rx_descs[priv->rx_dnext];
+       }
+
+}
+
+static enum hrtimer_restart ec_bhf_timer_fun(struct hrtimer *timer)
+{
+       struct ec_bhf_priv *priv = container_of(timer, struct ec_bhf_priv,
+                                               hrtimer);
+       ec_bhf_process_rx(priv);
+       ec_bhf_process_tx(priv);
+
+       if (!netif_running(priv->net_dev))
+               return HRTIMER_NORESTART;
+
+       hrtimer_forward_now(timer, ktime_set(0, polling_frequency));
+       return HRTIMER_RESTART;
+}
+
+static int ec_bhf_setup_offsets(struct ec_bhf_priv *priv)
+{
+       struct device *dev = PRIV_TO_DEV(priv);
+       unsigned block_count, i;
+       void __iomem *ec_info;
+
+       dev_dbg(dev, "Info block:\n");
+       dev_dbg(dev, "Type of function: %x\n", (unsigned)ioread16(priv->io));
+       dev_dbg(dev, "Revision of function: %x\n",
+               (unsigned)ioread16(priv->io + INFO_BLOCK_REV));
+
+       block_count = ioread8(priv->io + INFO_BLOCK_BLK_CNT);
+       dev_dbg(dev, "Number of function blocks: %x\n", block_count);
+
+       for (i = 0; i < block_count; i++) {
+               u16 type = ioread16(priv->io + i * INFO_BLOCK_SIZE +
+                                   INFO_BLOCK_TYPE);
+               if (type == ETHERCAT_MASTER_ID)
+                       break;
+       }
+       if (i == block_count) {
+               dev_err(dev, "EtherCAT master with DMA block not found\n");
+               return -ENODEV;
+       }
+       dev_dbg(dev, "EtherCAT master with DMA block found at pos: %d\n", i);
+
+       ec_info = priv->io + i * INFO_BLOCK_SIZE;
+       dev_dbg(dev, "EtherCAT master revision: %d\n",
+               ioread16(ec_info + INFO_BLOCK_REV));
+
+       priv->tx_dma_chan = ioread8(ec_info + INFO_BLOCK_TX_CHAN);
+       dev_dbg(dev, "EtherCAT master tx dma channel: %d\n",
+               priv->tx_dma_chan);
+
+       priv->rx_dma_chan = ioread8(ec_info + INFO_BLOCK_RX_CHAN);
+       dev_dbg(dev, "EtherCAT master rx dma channel: %d\n",
+                priv->rx_dma_chan);
+
+       priv->ec_io = priv->io + ioread32(ec_info + INFO_BLOCK_OFFSET);
+       priv->mii_io = priv->ec_io + ioread32(priv->ec_io + EC_MII_OFFSET);
+       priv->fifo_io = priv->ec_io + ioread32(priv->ec_io + EC_FIFO_OFFSET);
+       priv->mac_io = priv->ec_io + ioread32(priv->ec_io + EC_MAC_OFFSET);
+
+       dev_dbg(dev,
+               "EtherCAT block addres: %p, fifo address: %p, mii address: %p, mac address: %p\n",
+               priv->ec_io, priv->fifo_io, priv->mii_io, priv->mac_io);
+
+       return 0;
+}
+
+static netdev_tx_t ec_bhf_start_xmit(struct sk_buff *skb,
+                                    struct net_device *net_dev)
+{
+       struct ec_bhf_priv *priv = netdev_priv(net_dev);
+       struct tx_desc *desc;
+       unsigned len;
+
+       dev_dbg(PRIV_TO_DEV(priv), "Starting xmit\n");
+
+       desc = &priv->tx_descs[priv->tx_dnext];
+
+       skb_copy_and_csum_dev(skb, desc->data);
+       len = skb->len;
+
+       memset(&desc->header, 0, sizeof(desc->header));
+       desc->header.len = cpu_to_le16(len);
+       desc->header.port = TX_HDR_PORT_0;
+
+       ec_bhf_send_packet(priv, desc);
+
+       priv->tx_dnext = (priv->tx_dnext + 1) % priv->tx_dcount;
+
+       if (!ec_bhf_desc_sent(&priv->tx_descs[priv->tx_dnext])) {
+               /* Make sure that update updates to tx_dnext are perceived
+                * by timer routine.
+                */
+               smp_wmb();
+
+               netif_stop_queue(net_dev);
+
+               dev_dbg(PRIV_TO_DEV(priv), "Stopping netif queue\n");
+               ec_bhf_print_status(priv);
+       }
+
+       priv->stat_tx_bytes += len;
+
+       dev_kfree_skb(skb);
+
+       return NETDEV_TX_OK;
+}
+
+static int ec_bhf_alloc_dma_mem(struct ec_bhf_priv *priv,
+                               struct bhf_dma *buf,
+                               int channel,
+                               int size)
+{
+       int offset = channel * DMA_CHAN_SIZE + DMA_CHAN_OFFSET;
+       struct device *dev = PRIV_TO_DEV(priv);
+       u32 mask;
+
+       iowrite32(0xffffffff, priv->dma_io + offset);
+
+       mask = ioread32(priv->dma_io + offset);
+       mask &= DMA_WINDOW_SIZE_MASK;
+       dev_dbg(dev, "Read mask %x for channel %d\n", mask, channel);
+
+       /* We want to allocate a chunk of memory that is:
+        * - aligned to the mask we just read
+        * - is of size 2^mask bytes (at most)
+        * In order to ensure that we will allocate buffer of
+        * 2 * 2^mask bytes.
+        */
+       buf->len = min_t(int, ~mask + 1, size);
+       buf->alloc_len = 2 * buf->len;
+
+       dev_dbg(dev, "Allocating %d bytes for channel %d",
+               (int)buf->alloc_len, channel);
+       buf->alloc = dma_alloc_coherent(dev, buf->alloc_len, &buf->alloc_phys,
+                                       GFP_KERNEL);
+       if (buf->alloc == NULL) {
+               dev_info(dev, "Failed to allocate buffer\n");
+               return -ENOMEM;
+       }
+
+       buf->buf_phys = (buf->alloc_phys + buf->len) & mask;
+       buf->buf = buf->alloc + (buf->buf_phys - buf->alloc_phys);
+
+       iowrite32(0, priv->dma_io + offset + 4);
+       iowrite32(buf->buf_phys, priv->dma_io + offset);
+       dev_dbg(dev, "Buffer: %x and read from dev: %x",
+               (unsigned)buf->buf_phys, ioread32(priv->dma_io + offset));
+
+       return 0;
+}
+
+static void ec_bhf_setup_tx_descs(struct ec_bhf_priv *priv)
+{
+       int i = 0;
+
+       priv->tx_dcount = priv->tx_buf.len / sizeof(struct tx_desc);
+       priv->tx_descs = (struct tx_desc *) priv->tx_buf.buf;
+       priv->tx_dnext = 0;
+
+       for (i = 0; i < priv->tx_dcount; i++)
+               priv->tx_descs[i].header.sent = cpu_to_le32(TX_HDR_SENT);
+}
+
+static void ec_bhf_setup_rx_descs(struct ec_bhf_priv *priv)
+{
+       int i;
+
+       priv->rx_dcount = priv->rx_buf.len / sizeof(struct rx_desc);
+       priv->rx_descs = (struct rx_desc *) priv->rx_buf.buf;
+       priv->rx_dnext = 0;
+
+       for (i = 0; i < priv->rx_dcount; i++) {
+               struct rx_desc *desc = &priv->rx_descs[i];
+               u32 next;
+
+               if (i != priv->rx_dcount - 1)
+                       next = (u8 *)(desc + 1) - priv->rx_buf.buf;
+               else
+                       next = 0;
+               next |= RXHDR_NEXT_VALID;
+               desc->header.next = cpu_to_le32(next);
+               desc->header.recv = 0;
+               ec_bhf_add_rx_desc(priv, desc);
+       }
+}
+
+static int ec_bhf_open(struct net_device *net_dev)
+{
+       struct ec_bhf_priv *priv = netdev_priv(net_dev);
+       struct device *dev = PRIV_TO_DEV(priv);
+       int err = 0;
+
+       dev_info(dev, "Opening device\n");
+
+       ec_bhf_reset(priv);
+
+       err = ec_bhf_alloc_dma_mem(priv, &priv->rx_buf, priv->rx_dma_chan,
+                                  FIFO_SIZE * sizeof(struct rx_desc));
+       if (err) {
+               dev_err(dev, "Failed to allocate rx buffer\n");
+               goto out;
+       }
+       ec_bhf_setup_rx_descs(priv);
+
+       dev_info(dev, "RX buffer allocated, address: %x\n",
+                (unsigned)priv->rx_buf.buf_phys);
+
+       err = ec_bhf_alloc_dma_mem(priv, &priv->tx_buf, priv->tx_dma_chan,
+                                  FIFO_SIZE * sizeof(struct tx_desc));
+       if (err) {
+               dev_err(dev, "Failed to allocate tx buffer\n");
+               goto error_rx_free;
+       }
+       dev_dbg(dev, "TX buffer allocated, addres: %x\n",
+               (unsigned)priv->tx_buf.buf_phys);
+
+       iowrite8(0, priv->mii_io + MII_MAC_FILT_FLAG);
+
+       ec_bhf_setup_tx_descs(priv);
+
+       netif_start_queue(net_dev);
+
+       hrtimer_init(&priv->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+       priv->hrtimer.function = ec_bhf_timer_fun;
+       hrtimer_start(&priv->hrtimer, ktime_set(0, polling_frequency),
+                     HRTIMER_MODE_REL);
+
+       dev_info(PRIV_TO_DEV(priv), "Device open\n");
+
+       ec_bhf_print_status(priv);
+
+       return 0;
+
+error_rx_free:
+       dma_free_coherent(dev, priv->rx_buf.alloc_len, priv->rx_buf.alloc,
+                         priv->rx_buf.alloc_len);
+out:
+       return err;
+}
+
+static int ec_bhf_stop(struct net_device *net_dev)
+{
+       struct ec_bhf_priv *priv = netdev_priv(net_dev);
+       struct device *dev = PRIV_TO_DEV(priv);
+
+       hrtimer_cancel(&priv->hrtimer);
+
+       ec_bhf_reset(priv);
+
+       netif_tx_disable(net_dev);
+
+       dma_free_coherent(dev, priv->tx_buf.alloc_len,
+                         priv->tx_buf.alloc, priv->tx_buf.alloc_phys);
+       dma_free_coherent(dev, priv->rx_buf.alloc_len,
+                         priv->rx_buf.alloc, priv->rx_buf.alloc_phys);
+
+       return 0;
+}
+
+static struct rtnl_link_stats64 *
+ec_bhf_get_stats(struct net_device *net_dev,
+                struct rtnl_link_stats64 *stats)
+{
+       struct ec_bhf_priv *priv = netdev_priv(net_dev);
+
+       stats->rx_errors = ioread8(priv->mac_io + MAC_RX_ERR_CNT) +
+                               ioread8(priv->mac_io + MAC_CRC_ERR_CNT) +
+                               ioread8(priv->mac_io + MAC_FRAME_ERR_CNT);
+       stats->rx_packets = ioread32(priv->mac_io + MAC_RX_FRAME_CNT);
+       stats->tx_packets = ioread32(priv->mac_io + MAC_TX_FRAME_CNT);
+       stats->rx_dropped = ioread8(priv->mac_io + MAC_DROPPED_FRMS);
+
+       stats->tx_bytes = priv->stat_tx_bytes;
+       stats->rx_bytes = priv->stat_rx_bytes;
+
+       return stats;
+}
+
+static const struct net_device_ops ec_bhf_netdev_ops = {
+       .ndo_start_xmit         = ec_bhf_start_xmit,
+       .ndo_open               = ec_bhf_open,
+       .ndo_stop               = ec_bhf_stop,
+       .ndo_get_stats64        = ec_bhf_get_stats,
+       .ndo_change_mtu         = eth_change_mtu,
+       .ndo_validate_addr      = eth_validate_addr,
+       .ndo_set_mac_address    = eth_mac_addr
+};
+
+static int ec_bhf_probe(struct pci_dev *dev, const struct pci_device_id *id)
+{
+       struct net_device *net_dev;
+       struct ec_bhf_priv *priv;
+       void __iomem *dma_io;
+       void __iomem *io;
+       int err = 0;
+
+       err = pci_enable_device(dev);
+       if (err)
+               return err;
+
+       pci_set_master(dev);
+
+       err = pci_set_dma_mask(dev, DMA_BIT_MASK(32));
+       if (err) {
+               dev_err(&dev->dev,
+                       "Required dma mask not supported, failed to initialize device\n");
+               err = -EIO;
+               goto err_disable_dev;
+       }
+
+       err = pci_set_consistent_dma_mask(dev, DMA_BIT_MASK(32));
+       if (err) {
+               dev_err(&dev->dev,
+                       "Required dma mask not supported, failed to initialize device\n");
+               goto err_disable_dev;
+       }
+
+       err = pci_request_regions(dev, "ec_bhf");
+       if (err) {
+               dev_err(&dev->dev, "Failed to request pci memory regions\n");
+               goto err_disable_dev;
+       }
+
+       io = pci_iomap(dev, 0, 0);
+       if (!io) {
+               dev_err(&dev->dev, "Failed to map pci card memory bar 0");
+               err = -EIO;
+               goto err_release_regions;
+       }
+
+       dma_io = pci_iomap(dev, 2, 0);
+       if (!dma_io) {
+               dev_err(&dev->dev, "Failed to map pci card memory bar 2");
+               err = -EIO;
+               goto err_unmap;
+       }
+
+       net_dev = alloc_etherdev(sizeof(struct ec_bhf_priv));
+       if (net_dev == NULL) {
+               err = -ENOMEM;
+               goto err_unmap_dma_io;
+       }
+
+       pci_set_drvdata(dev, net_dev);
+       SET_NETDEV_DEV(net_dev, &dev->dev);
+
+       net_dev->features = 0;
+       net_dev->flags |= IFF_NOARP;
+
+       net_dev->netdev_ops = &ec_bhf_netdev_ops;
+
+       priv = netdev_priv(net_dev);
+       priv->net_dev = net_dev;
+       priv->io = io;
+       priv->dma_io = dma_io;
+       priv->dev = dev;
+
+       err = ec_bhf_setup_offsets(priv);
+       if (err < 0)
+               goto err_free_net_dev;
+
+       memcpy_fromio(net_dev->dev_addr, priv->mii_io + MII_MAC_ADDR, 6);
+
+       dev_dbg(&dev->dev, "CX5020 Ethercat master address: %pM\n",
+               net_dev->dev_addr);
+
+       err = register_netdev(net_dev);
+       if (err < 0)
+               goto err_free_net_dev;
+
+       return 0;
+
+err_free_net_dev:
+       free_netdev(net_dev);
+err_unmap_dma_io:
+       pci_iounmap(dev, dma_io);
+err_unmap:
+       pci_iounmap(dev, io);
+err_release_regions:
+       pci_release_regions(dev);
+err_disable_dev:
+       pci_clear_master(dev);
+       pci_disable_device(dev);
+
+       return err;
+}
+
+static void ec_bhf_remove(struct pci_dev *dev)
+{
+       struct net_device *net_dev = pci_get_drvdata(dev);
+       struct ec_bhf_priv *priv = netdev_priv(net_dev);
+
+       unregister_netdev(net_dev);
+       free_netdev(net_dev);
+
+       pci_iounmap(dev, priv->dma_io);
+       pci_iounmap(dev, priv->io);
+       pci_release_regions(dev);
+       pci_clear_master(dev);
+       pci_disable_device(dev);
+}
+
+static struct pci_driver pci_driver = {
+       .name           = "ec_bhf",
+       .id_table       = ids,
+       .probe          = ec_bhf_probe,
+       .remove         = ec_bhf_remove,
+};
+
+static int __init ec_bhf_init(void)
+{
+       return pci_register_driver(&pci_driver);
+}
+
+static void __exit ec_bhf_exit(void)
+{
+       pci_unregister_driver(&pci_driver);
+}
+
+module_init(ec_bhf_init);
+module_exit(ec_bhf_exit);
+
+module_param(polling_frequency, long, S_IRUGO);
+MODULE_PARM_DESC(polling_frequency, "Polling timer frequency in ns");
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Dariusz Marcinkiewicz <reksio@newterm.pl>");
index 97db5a7179df1c6c733bed5e25e5be3d44d39fa1..31c376628bfdac0158f61aef1aef3ee1aaac7e1b 100644 (file)
@@ -120,6 +120,9 @@ static inline char *nic_name(struct pci_dev *pdev)
 #define MAX_VFS                        30 /* Max VFs supported by BE3 FW */
 #define FW_VER_LEN             32
 
+#define        RSS_INDIR_TABLE_LEN     128
+#define RSS_HASH_KEY_LEN       40
+
 struct be_dma_mem {
        void *va;
        dma_addr_t dma;
@@ -409,6 +412,13 @@ struct be_resources {
        u32 if_cap_flags;
 };
 
+struct rss_info {
+       u64 rss_flags;
+       u8 rsstable[RSS_INDIR_TABLE_LEN];
+       u8 rss_queue[RSS_INDIR_TABLE_LEN];
+       u8 rss_hkey[RSS_HASH_KEY_LEN];
+};
+
 struct be_adapter {
        struct pci_dev *pdev;
        struct net_device *netdev;
@@ -445,7 +455,7 @@ struct be_adapter {
        struct be_drv_stats drv_stats;
        struct be_aic_obj aic_obj[MAX_EVT_QS];
        u16 vlans_added;
-       u8 vlan_tag[VLAN_N_VID];
+       unsigned long vids[BITS_TO_LONGS(VLAN_N_VID)];
        u8 vlan_prio_bmap;      /* Available Priority BitMap */
        u16 recommended_prio;   /* Recommended Priority */
        struct be_dma_mem rx_filter; /* Cmd DMA mem for rx-filter */
@@ -507,7 +517,7 @@ struct be_adapter {
        u32 msg_enable;
        int be_get_temp_freq;
        u8 pf_number;
-       u64 rss_flags;
+       struct rss_info rss_info;
 };
 
 #define be_physfn(adapter)             (!adapter->virtfn)
index d1ec15af0d2482f46c425bb7d2f6c99976c16329..476752d0a6a4c84594838424b7cbfc07c4f9bbf1 100644 (file)
@@ -52,8 +52,7 @@ static struct be_cmd_priv_map cmd_priv_map[] = {
        }
 };
 
-static bool be_cmd_allowed(struct be_adapter *adapter, u8 opcode,
-                          u8 subsystem)
+static bool be_cmd_allowed(struct be_adapter *adapter, u8 opcode, u8 subsystem)
 {
        int i;
        int num_entries = sizeof(cmd_priv_map)/sizeof(struct be_cmd_priv_map);
@@ -197,7 +196,7 @@ done:
 
 /* Link state evt is a string of bytes; no need for endian swapping */
 static void be_async_link_state_process(struct be_adapter *adapter,
-               struct be_async_event_link_state *evt)
+                                       struct be_async_event_link_state *evt)
 {
        /* When link status changes, link speed must be re-queried from FW */
        adapter->phy.link_speed = -1;
@@ -221,7 +220,9 @@ static void be_async_link_state_process(struct be_adapter *adapter,
 
 /* Grp5 CoS Priority evt */
 static void be_async_grp5_cos_priority_process(struct be_adapter *adapter,
-               struct be_async_event_grp5_cos_priority *evt)
+                                              struct
+                                              be_async_event_grp5_cos_priority
+                                              *evt)
 {
        if (evt->valid) {
                adapter->vlan_prio_bmap = evt->available_priority_bmap;
@@ -233,7 +234,9 @@ static void be_async_grp5_cos_priority_process(struct be_adapter *adapter,
 
 /* Grp5 QOS Speed evt: qos_link_speed is in units of 10 Mbps */
 static void be_async_grp5_qos_speed_process(struct be_adapter *adapter,
-               struct be_async_event_grp5_qos_link_speed *evt)
+                                           struct
+                                           be_async_event_grp5_qos_link_speed
+                                           *evt)
 {
        if (adapter->phy.link_speed >= 0 &&
            evt->physical_port == adapter->port_num)
@@ -242,7 +245,9 @@ static void be_async_grp5_qos_speed_process(struct be_adapter *adapter,
 
 /*Grp5 PVID evt*/
 static void be_async_grp5_pvid_state_process(struct be_adapter *adapter,
-               struct be_async_event_grp5_pvid_state *evt)
+                                            struct
+                                            be_async_event_grp5_pvid_state
+                                            *evt)
 {
        if (evt->enabled) {
                adapter->pvid = le16_to_cpu(evt->tag) & VLAN_VID_MASK;
@@ -253,7 +258,7 @@ static void be_async_grp5_pvid_state_process(struct be_adapter *adapter,
 }
 
 static void be_async_grp5_evt_process(struct be_adapter *adapter,
-               u32 trailer, struct be_mcc_compl *evt)
+                                     u32 trailer, struct be_mcc_compl *evt)
 {
        u8 event_type = 0;
 
@@ -281,7 +286,7 @@ static void be_async_grp5_evt_process(struct be_adapter *adapter,
 }
 
 static void be_async_dbg_evt_process(struct be_adapter *adapter,
-               u32 trailer, struct be_mcc_compl *cmp)
+                                    u32 trailer, struct be_mcc_compl *cmp)
 {
        u8 event_type = 0;
        struct be_async_event_qnq *evt = (struct be_async_event_qnq *) cmp;
@@ -370,10 +375,10 @@ int be_process_mcc(struct be_adapter *adapter)
                                (struct be_async_event_link_state *) compl);
                        else if (is_grp5_evt(compl->flags))
                                be_async_grp5_evt_process(adapter,
-                               compl->flags, compl);
+                                                         compl->flags, compl);
                        else if (is_dbg_evt(compl->flags))
                                be_async_dbg_evt_process(adapter,
-                               compl->flags, compl);
+                                                        compl->flags, compl);
                } else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) {
                                status = be_mcc_compl_process(adapter, compl);
                                atomic_dec(&mcc_obj->q.used);
@@ -560,10 +565,8 @@ static bool lancer_provisioning_error(struct be_adapter *adapter)
        u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
        sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
        if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
-               sliport_err1 = ioread32(adapter->db +
-                                       SLIPORT_ERROR1_OFFSET);
-               sliport_err2 = ioread32(adapter->db +
-                                       SLIPORT_ERROR2_OFFSET);
+               sliport_err1 = ioread32(adapter->db + SLIPORT_ERROR1_OFFSET);
+               sliport_err2 = ioread32(adapter->db + SLIPORT_ERROR2_OFFSET);
 
                if (sliport_err1 == SLIPORT_ERROR_NO_RESOURCE1 &&
                    sliport_err2 == SLIPORT_ERROR_NO_RESOURCE2)
@@ -630,8 +633,7 @@ int be_fw_wait_ready(struct be_adapter *adapter)
                if (stage == POST_STAGE_ARMFW_RDY)
                        return 0;
 
-               dev_info(dev, "Waiting for POST, %ds elapsed\n",
-                        timeout);
+               dev_info(dev, "Waiting for POST, %ds elapsed\n", timeout);
                if (msleep_interruptible(2000)) {
                        dev_err(dev, "Waiting for POST aborted\n");
                        return -EINTR;
@@ -649,8 +651,7 @@ static inline struct be_sge *nonembedded_sgl(struct be_mcc_wrb *wrb)
        return &wrb->payload.sgl[0];
 }
 
-static inline void fill_wrb_tags(struct be_mcc_wrb *wrb,
-                                unsigned long addr)
+static inline void fill_wrb_tags(struct be_mcc_wrb *wrb, unsigned long addr)
 {
        wrb->tag0 = addr & 0xFFFFFFFF;
        wrb->tag1 = upper_32_bits(addr);
@@ -659,8 +660,9 @@ static inline void fill_wrb_tags(struct be_mcc_wrb *wrb,
 /* Don't touch the hdr after it's prepared */
 /* mem will be NULL for embedded commands */
 static void be_wrb_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,
-                               u8 subsystem, u8 opcode, int cmd_len,
-                               struct be_mcc_wrb *wrb, struct be_dma_mem *mem)
+                                  u8 subsystem, u8 opcode, int cmd_len,
+                                  struct be_mcc_wrb *wrb,
+                                  struct be_dma_mem *mem)
 {
        struct be_sge *sge;
 
@@ -683,7 +685,7 @@ static void be_wrb_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,
 }
 
 static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages,
-                       struct be_dma_mem *mem)
+                                     struct be_dma_mem *mem)
 {
        int i, buf_pages = min(PAGES_4K_SPANNED(mem->va, mem->size), max_pages);
        u64 dma = (u64)mem->dma;
@@ -868,7 +870,8 @@ int be_cmd_eq_create(struct be_adapter *adapter, struct be_eq_obj *eqo)
        req = embedded_payload(wrb);
 
        be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
-               OPCODE_COMMON_EQ_CREATE, sizeof(*req), wrb, NULL);
+                              OPCODE_COMMON_EQ_CREATE, sizeof(*req), wrb,
+                              NULL);
 
        /* Support for EQ_CREATEv2 available only SH-R onwards */
        if (!(BEx_chip(adapter) || lancer_chip(adapter)))
@@ -917,7 +920,8 @@ int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
        req = embedded_payload(wrb);
 
        be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
-               OPCODE_COMMON_NTWK_MAC_QUERY, sizeof(*req), wrb, NULL);
+                              OPCODE_COMMON_NTWK_MAC_QUERY, sizeof(*req), wrb,
+                              NULL);
        req->type = MAC_ADDRESS_TYPE_NETWORK;
        if (permanent) {
                req->permanent = 1;
@@ -940,7 +944,7 @@ err:
 
 /* Uses synchronous MCCQ */
 int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
-               u32 if_id, u32 *pmac_id, u32 domain)
+                   u32 if_id, u32 *pmac_id, u32 domain)
 {
        struct be_mcc_wrb *wrb;
        struct be_cmd_req_pmac_add *req;
@@ -956,7 +960,8 @@ int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
        req = embedded_payload(wrb);
 
        be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
-               OPCODE_COMMON_NTWK_PMAC_ADD, sizeof(*req), wrb, NULL);
+                              OPCODE_COMMON_NTWK_PMAC_ADD, sizeof(*req), wrb,
+                              NULL);
 
        req->hdr.domain = domain;
        req->if_id = cpu_to_le32(if_id);
@@ -1012,7 +1017,7 @@ err:
 
 /* Uses Mbox */
 int be_cmd_cq_create(struct be_adapter *adapter, struct be_queue_info *cq,
-               struct be_queue_info *eq, bool no_delay, int coalesce_wm)
+                    struct be_queue_info *eq, bool no_delay, int coalesce_wm)
 {
        struct be_mcc_wrb *wrb;
        struct be_cmd_req_cq_create *req;
@@ -1028,17 +1033,18 @@ int be_cmd_cq_create(struct be_adapter *adapter, struct be_queue_info *cq,
        ctxt = &req->context;
 
        be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
-               OPCODE_COMMON_CQ_CREATE, sizeof(*req), wrb, NULL);
+                              OPCODE_COMMON_CQ_CREATE, sizeof(*req), wrb,
+                              NULL);
 
        req->num_pages =  cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
 
        if (BEx_chip(adapter)) {
                AMAP_SET_BITS(struct amap_cq_context_be, coalescwm, ctxt,
-                                                               coalesce_wm);
+                             coalesce_wm);
                AMAP_SET_BITS(struct amap_cq_context_be, nodelay,
-                                                               ctxt, no_delay);
+                             ctxt, no_delay);
                AMAP_SET_BITS(struct amap_cq_context_be, count, ctxt,
-                                               __ilog2_u32(cq->len/256));
+                             __ilog2_u32(cq->len / 256));
                AMAP_SET_BITS(struct amap_cq_context_be, valid, ctxt, 1);
                AMAP_SET_BITS(struct amap_cq_context_be, eventable, ctxt, 1);
                AMAP_SET_BITS(struct amap_cq_context_be, eqid, ctxt, eq->id);
@@ -1053,14 +1059,12 @@ int be_cmd_cq_create(struct be_adapter *adapter, struct be_queue_info *cq,
                        AMAP_SET_BITS(struct amap_cq_context_v2, coalescwm,
                                      ctxt, coalesce_wm);
                AMAP_SET_BITS(struct amap_cq_context_v2, nodelay, ctxt,
-                                                               no_delay);
+                             no_delay);
                AMAP_SET_BITS(struct amap_cq_context_v2, count, ctxt,
-                                               __ilog2_u32(cq->len/256));
+                             __ilog2_u32(cq->len / 256));
                AMAP_SET_BITS(struct amap_cq_context_v2, valid, ctxt, 1);
-               AMAP_SET_BITS(struct amap_cq_context_v2, eventable,
-                                                               ctxt, 1);
-               AMAP_SET_BITS(struct amap_cq_context_v2, eqid,
-                                                               ctxt, eq->id);
+               AMAP_SET_BITS(struct amap_cq_context_v2, eventable, ctxt, 1);
+               AMAP_SET_BITS(struct amap_cq_context_v2, eqid, ctxt, eq->id);
        }
 
        be_dws_cpu_to_le(ctxt, sizeof(req->context));
@@ -1088,8 +1092,8 @@ static u32 be_encoded_q_len(int q_len)
 }
 
 static int be_cmd_mccq_ext_create(struct be_adapter *adapter,
-                               struct be_queue_info *mccq,
-                               struct be_queue_info *cq)
+                                 struct be_queue_info *mccq,
+                                 struct be_queue_info *cq)
 {
        struct be_mcc_wrb *wrb;
        struct be_cmd_req_mcc_ext_create *req;
@@ -1105,13 +1109,14 @@ static int be_cmd_mccq_ext_create(struct be_adapter *adapter,
        ctxt = &req->context;
 
        be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
-                       OPCODE_COMMON_MCC_CREATE_EXT, sizeof(*req), wrb, NULL);
+                              OPCODE_COMMON_MCC_CREATE_EXT, sizeof(*req), wrb,
+                              NULL);
 
        req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
        if (BEx_chip(adapter)) {
                AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1);
                AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt,
-                                               be_encoded_q_len(mccq->len));
+                             be_encoded_q_len(mccq->len));
                AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id);
        } else {
                req->hdr.version = 1;
@@ -1145,8 +1150,8 @@ static int be_cmd_mccq_ext_create(struct be_adapter *adapter,
 }
 
 static int be_cmd_mccq_org_create(struct be_adapter *adapter,
-                               struct be_queue_info *mccq,
-                               struct be_queue_info *cq)
+                                 struct be_queue_info *mccq,
+                                 struct be_queue_info *cq)
 {
        struct be_mcc_wrb *wrb;
        struct be_cmd_req_mcc_create *req;
@@ -1162,13 +1167,14 @@ static int be_cmd_mccq_org_create(struct be_adapter *adapter,
        ctxt = &req->context;
 
        be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
-                       OPCODE_COMMON_MCC_CREATE, sizeof(*req), wrb, NULL);
+                              OPCODE_COMMON_MCC_CREATE, sizeof(*req), wrb,
+                              NULL);
 
        req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
 
        AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1);
        AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt,
-                       be_encoded_q_len(mccq->len));
+                     be_encoded_q_len(mccq->len));
        AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id);
 
        be_dws_cpu_to_le(ctxt, sizeof(req->context));
@@ -1187,8 +1193,7 @@ static int be_cmd_mccq_org_create(struct be_adapter *adapter,
 }
 
 int be_cmd_mccq_create(struct be_adapter *adapter,
-                       struct be_queue_info *mccq,
-                       struct be_queue_info *cq)
+                      struct be_queue_info *mccq, struct be_queue_info *cq)
 {
        int status;
 
@@ -1213,7 +1218,7 @@ int be_cmd_txq_create(struct be_adapter *adapter, struct be_tx_obj *txo)
 
        req = embedded_payload(&wrb);
        be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
-                               OPCODE_ETH_TX_CREATE, sizeof(*req), &wrb, NULL);
+                              OPCODE_ETH_TX_CREATE, sizeof(*req), &wrb, NULL);
 
        if (lancer_chip(adapter)) {
                req->hdr.version = 1;
@@ -1250,8 +1255,8 @@ int be_cmd_txq_create(struct be_adapter *adapter, struct be_tx_obj *txo)
 
 /* Uses MCC */
 int be_cmd_rxq_create(struct be_adapter *adapter,
-               struct be_queue_info *rxq, u16 cq_id, u16 frag_size,
-               u32 if_id, u32 rss, u8 *rss_id)
+                     struct be_queue_info *rxq, u16 cq_id, u16 frag_size,
+                     u32 if_id, u32 rss, u8 *rss_id)
 {
        struct be_mcc_wrb *wrb;
        struct be_cmd_req_eth_rx_create *req;
@@ -1268,7 +1273,7 @@ int be_cmd_rxq_create(struct be_adapter *adapter,
        req = embedded_payload(wrb);
 
        be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
-                               OPCODE_ETH_RX_CREATE, sizeof(*req), wrb, NULL);
+                              OPCODE_ETH_RX_CREATE, sizeof(*req), wrb, NULL);
 
        req->cq_id = cpu_to_le16(cq_id);
        req->frag_size = fls(frag_size) - 1;
@@ -1295,7 +1300,7 @@ err:
  * Uses Mbox
  */
 int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
-               int queue_type)
+                    int queue_type)
 {
        struct be_mcc_wrb *wrb;
        struct be_cmd_req_q_destroy *req;
@@ -1334,7 +1339,7 @@ int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
        }
 
        be_wrb_cmd_hdr_prepare(&req->hdr, subsys, opcode, sizeof(*req), wrb,
-                               NULL);
+                              NULL);
        req->id = cpu_to_le16(q->id);
 
        status = be_mbox_notify_wait(adapter);
@@ -1361,7 +1366,7 @@ int be_cmd_rxq_destroy(struct be_adapter *adapter, struct be_queue_info *q)
        req = embedded_payload(wrb);
 
        be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
-                       OPCODE_ETH_RX_DESTROY, sizeof(*req), wrb, NULL);
+                              OPCODE_ETH_RX_DESTROY, sizeof(*req), wrb, NULL);
        req->id = cpu_to_le16(q->id);
 
        status = be_mcc_notify_wait(adapter);
@@ -1384,7 +1389,8 @@ int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags,
 
        req = embedded_payload(&wrb);
        be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
-               OPCODE_COMMON_NTWK_INTERFACE_CREATE, sizeof(*req), &wrb, NULL);
+                              OPCODE_COMMON_NTWK_INTERFACE_CREATE,
+                              sizeof(*req), &wrb, NULL);
        req->hdr.domain = domain;
        req->capability_flags = cpu_to_le32(cap_flags);
        req->enable_flags = cpu_to_le32(en_flags);
@@ -1422,7 +1428,8 @@ int be_cmd_if_destroy(struct be_adapter *adapter, int interface_id, u32 domain)
        req = embedded_payload(wrb);
 
        be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
-               OPCODE_COMMON_NTWK_INTERFACE_DESTROY, sizeof(*req), wrb, NULL);
+                              OPCODE_COMMON_NTWK_INTERFACE_DESTROY,
+                              sizeof(*req), wrb, NULL);
        req->hdr.domain = domain;
        req->interface_id = cpu_to_le32(interface_id);
 
@@ -1452,7 +1459,8 @@ int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd)
        hdr = nonemb_cmd->va;
 
        be_wrb_cmd_hdr_prepare(hdr, CMD_SUBSYSTEM_ETH,
-               OPCODE_ETH_GET_STATISTICS, nonemb_cmd->size, wrb, nonemb_cmd);
+                              OPCODE_ETH_GET_STATISTICS, nonemb_cmd->size, wrb,
+                              nonemb_cmd);
 
        /* version 1 of the cmd is not supported only by BE2 */
        if (BE2_chip(adapter))
@@ -1472,7 +1480,7 @@ err:
 
 /* Lancer Stats */
 int lancer_cmd_get_pport_stats(struct be_adapter *adapter,
-                               struct be_dma_mem *nonemb_cmd)
+                              struct be_dma_mem *nonemb_cmd)
 {
 
        struct be_mcc_wrb *wrb;
@@ -1493,8 +1501,8 @@ int lancer_cmd_get_pport_stats(struct be_adapter *adapter,
        req = nonemb_cmd->va;
 
        be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
-                       OPCODE_ETH_GET_PPORT_STATS, nonemb_cmd->size, wrb,
-                       nonemb_cmd);
+                              OPCODE_ETH_GET_PPORT_STATS, nonemb_cmd->size,
+                              wrb, nonemb_cmd);
 
        req->cmd_params.params.pport_num = cpu_to_le16(adapter->hba_port_num);
        req->cmd_params.params.reset_stats = 0;
@@ -1553,7 +1561,8 @@ int be_cmd_link_status_query(struct be_adapter *adapter, u16 *link_speed,
        req = embedded_payload(wrb);
 
        be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
-               OPCODE_COMMON_NTWK_LINK_STATUS_QUERY, sizeof(*req), wrb, NULL);
+                              OPCODE_COMMON_NTWK_LINK_STATUS_QUERY,
+                              sizeof(*req), wrb, NULL);
 
        /* version 1 of the cmd is not supported only by BE2 */
        if (!BE2_chip(adapter))
@@ -1598,8 +1607,8 @@ int be_cmd_get_die_temperature(struct be_adapter *adapter)
        req = embedded_payload(wrb);
 
        be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
-               OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES, sizeof(*req),
-               wrb, NULL);
+                              OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES,
+                              sizeof(*req), wrb, NULL);
 
        be_mcc_notify(adapter);
 
@@ -1625,7 +1634,8 @@ int be_cmd_get_reg_len(struct be_adapter *adapter, u32 *log_size)
        req = embedded_payload(wrb);
 
        be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
-               OPCODE_COMMON_MANAGE_FAT, sizeof(*req), wrb, NULL);
+                              OPCODE_COMMON_MANAGE_FAT, sizeof(*req), wrb,
+                              NULL);
        req->fat_operation = cpu_to_le32(QUERY_FAT);
        status = be_mcc_notify_wait(adapter);
        if (!status) {
@@ -1655,8 +1665,8 @@ void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf)
 
        get_fat_cmd.size = sizeof(struct be_cmd_req_get_fat) + 60*1024;
        get_fat_cmd.va = pci_alloc_consistent(adapter->pdev,
-                       get_fat_cmd.size,
-                       &get_fat_cmd.dma);
+                                             get_fat_cmd.size,
+                                             &get_fat_cmd.dma);
        if (!get_fat_cmd.va) {
                status = -ENOMEM;
                dev_err(&adapter->pdev->dev,
@@ -1679,8 +1689,8 @@ void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf)
 
                payload_len = sizeof(struct be_cmd_req_get_fat) + buf_size;
                be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
-                               OPCODE_COMMON_MANAGE_FAT, payload_len, wrb,
-                               &get_fat_cmd);
+                                      OPCODE_COMMON_MANAGE_FAT, payload_len,
+                                      wrb, &get_fat_cmd);
 
                req->fat_operation = cpu_to_le32(RETRIEVE_FAT);
                req->read_log_offset = cpu_to_le32(log_offset);
@@ -1691,8 +1701,8 @@ void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf)
                if (!status) {
                        struct be_cmd_resp_get_fat *resp = get_fat_cmd.va;
                        memcpy(buf + offset,
-                               resp->data_buffer,
-                               le32_to_cpu(resp->read_log_length));
+                              resp->data_buffer,
+                              le32_to_cpu(resp->read_log_length));
                } else {
                        dev_err(&adapter->pdev->dev, "FAT Table Retrieve error\n");
                        goto err;
@@ -1702,14 +1712,13 @@ void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf)
        }
 err:
        pci_free_consistent(adapter->pdev, get_fat_cmd.size,
-                       get_fat_cmd.va,
-                       get_fat_cmd.dma);
+                           get_fat_cmd.va, get_fat_cmd.dma);
        spin_unlock_bh(&adapter->mcc_lock);
 }
 
 /* Uses synchronous mcc */
 int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver,
-                       char *fw_on_flash)
+                     char *fw_on_flash)
 {
        struct be_mcc_wrb *wrb;
        struct be_cmd_req_get_fw_version *req;
@@ -1726,7 +1735,8 @@ int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver,
        req = embedded_payload(wrb);
 
        be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
-               OPCODE_COMMON_GET_FW_VERSION, sizeof(*req), wrb, NULL);
+                              OPCODE_COMMON_GET_FW_VERSION, sizeof(*req), wrb,
+                              NULL);
        status = be_mcc_notify_wait(adapter);
        if (!status) {
                struct be_cmd_resp_get_fw_version *resp = embedded_payload(wrb);
@@ -1759,7 +1769,8 @@ int be_cmd_modify_eqd(struct be_adapter *adapter, struct be_set_eqd *set_eqd,
        req = embedded_payload(wrb);
 
        be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
-               OPCODE_COMMON_MODIFY_EQ_DELAY, sizeof(*req), wrb, NULL);
+                              OPCODE_COMMON_MODIFY_EQ_DELAY, sizeof(*req), wrb,
+                              NULL);
 
        req->num_eq = cpu_to_le32(num);
        for (i = 0; i < num; i++) {
@@ -1777,7 +1788,7 @@ err:
 
 /* Uses sycnhronous mcc */
 int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
-                      u32 num, bool promiscuous)
+                      u32 num)
 {
        struct be_mcc_wrb *wrb;
        struct be_cmd_req_vlan_config *req;
@@ -1793,19 +1804,16 @@ int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
        req = embedded_payload(wrb);
 
        be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
-               OPCODE_COMMON_NTWK_VLAN_CONFIG, sizeof(*req), wrb, NULL);
+                              OPCODE_COMMON_NTWK_VLAN_CONFIG, sizeof(*req),
+                              wrb, NULL);
 
        req->interface_id = if_id;
-       req->promiscuous = promiscuous;
        req->untagged = BE_IF_FLAGS_UNTAGGED & be_if_cap_flags(adapter) ? 1 : 0;
        req->num_vlan = num;
-       if (!promiscuous) {
-               memcpy(req->normal_vlan, vtag_array,
-                       req->num_vlan * sizeof(vtag_array[0]));
-       }
+       memcpy(req->normal_vlan, vtag_array,
+              req->num_vlan * sizeof(vtag_array[0]));
 
        status = be_mcc_notify_wait(adapter);
-
 err:
        spin_unlock_bh(&adapter->mcc_lock);
        return status;
@@ -1827,18 +1835,19 @@ int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value)
        }
        memset(req, 0, sizeof(*req));
        be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
-                               OPCODE_COMMON_NTWK_RX_FILTER, sizeof(*req),
-                               wrb, mem);
+                              OPCODE_COMMON_NTWK_RX_FILTER, sizeof(*req),
+                              wrb, mem);
 
        req->if_id = cpu_to_le32(adapter->if_handle);
        if (flags & IFF_PROMISC) {
                req->if_flags_mask = cpu_to_le32(BE_IF_FLAGS_PROMISCUOUS |
-                                       BE_IF_FLAGS_VLAN_PROMISCUOUS |
-                                       BE_IF_FLAGS_MCAST_PROMISCUOUS);
+                                                BE_IF_FLAGS_VLAN_PROMISCUOUS |
+                                                BE_IF_FLAGS_MCAST_PROMISCUOUS);
                if (value == ON)
-                       req->if_flags = cpu_to_le32(BE_IF_FLAGS_PROMISCUOUS |
-                                               BE_IF_FLAGS_VLAN_PROMISCUOUS |
-                                               BE_IF_FLAGS_MCAST_PROMISCUOUS);
+                       req->if_flags =
+                               cpu_to_le32(BE_IF_FLAGS_PROMISCUOUS |
+                                           BE_IF_FLAGS_VLAN_PROMISCUOUS |
+                                           BE_IF_FLAGS_MCAST_PROMISCUOUS);
        } else if (flags & IFF_ALLMULTI) {
                req->if_flags_mask = req->if_flags =
                                cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS);
@@ -1867,7 +1876,7 @@ int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value)
        }
 
        if ((req->if_flags_mask & cpu_to_le32(be_if_cap_flags(adapter))) !=
-            req->if_flags_mask) {
+           req->if_flags_mask) {
                dev_warn(&adapter->pdev->dev,
                         "Cannot set rx filter flags 0x%x\n",
                         req->if_flags_mask);
@@ -1905,7 +1914,8 @@ int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc)
        req = embedded_payload(wrb);
 
        be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
-               OPCODE_COMMON_SET_FLOW_CONTROL, sizeof(*req), wrb, NULL);
+                              OPCODE_COMMON_SET_FLOW_CONTROL, sizeof(*req),
+                              wrb, NULL);
 
        req->tx_flow_control = cpu_to_le16((u16)tx_fc);
        req->rx_flow_control = cpu_to_le16((u16)rx_fc);
@@ -1938,7 +1948,8 @@ int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc)
        req = embedded_payload(wrb);
 
        be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
-               OPCODE_COMMON_GET_FLOW_CONTROL, sizeof(*req), wrb, NULL);
+                              OPCODE_COMMON_GET_FLOW_CONTROL, sizeof(*req),
+                              wrb, NULL);
 
        status = be_mcc_notify_wait(adapter);
        if (!status) {
@@ -1968,7 +1979,8 @@ int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num,
        req = embedded_payload(wrb);
 
        be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
-               OPCODE_COMMON_QUERY_FIRMWARE_CONFIG, sizeof(*req), wrb, NULL);
+                              OPCODE_COMMON_QUERY_FIRMWARE_CONFIG,
+                              sizeof(*req), wrb, NULL);
 
        status = be_mbox_notify_wait(adapter);
        if (!status) {
@@ -2011,7 +2023,8 @@ int be_cmd_reset_function(struct be_adapter *adapter)
        req = embedded_payload(wrb);
 
        be_wrb_cmd_hdr_prepare(req, CMD_SUBSYSTEM_COMMON,
-               OPCODE_COMMON_FUNCTION_RESET, sizeof(*req), wrb, NULL);
+                              OPCODE_COMMON_FUNCTION_RESET, sizeof(*req), wrb,
+                              NULL);
 
        status = be_mbox_notify_wait(adapter);
 
@@ -2020,47 +2033,47 @@ int be_cmd_reset_function(struct be_adapter *adapter)
 }
 
 int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable,
-                       u32 rss_hash_opts, u16 table_size)
+                     u32 rss_hash_opts, u16 table_size, u8 *rss_hkey)
 {
        struct be_mcc_wrb *wrb;
        struct be_cmd_req_rss_config *req;
-       u32 myhash[10] = {0x15d43fa5, 0x2534685a, 0x5f87693a, 0x5668494e,
-                       0x33cf6a53, 0x383334c6, 0x76ac4257, 0x59b242b2,
-                       0x3ea83c02, 0x4a110304};
        int status;
 
        if (!(be_if_cap_flags(adapter) & BE_IF_FLAGS_RSS))
                return 0;
 
-       if (mutex_lock_interruptible(&adapter->mbox_lock))
-               return -1;
+       spin_lock_bh(&adapter->mcc_lock);
 
-       wrb = wrb_from_mbox(adapter);
+       wrb = wrb_from_mccq(adapter);
+       if (!wrb) {
+               status = -EBUSY;
+               goto err;
+       }
        req = embedded_payload(wrb);
 
        be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
-               OPCODE_ETH_RSS_CONFIG, sizeof(*req), wrb, NULL);
+                              OPCODE_ETH_RSS_CONFIG, sizeof(*req), wrb, NULL);
 
        req->if_id = cpu_to_le32(adapter->if_handle);
        req->enable_rss = cpu_to_le16(rss_hash_opts);
        req->cpu_table_size_log2 = cpu_to_le16(fls(table_size) - 1);
 
-       if (lancer_chip(adapter) || skyhawk_chip(adapter))
+       if (!BEx_chip(adapter))
                req->hdr.version = 1;
 
        memcpy(req->cpu_table, rsstable, table_size);
-       memcpy(req->hash, myhash, sizeof(myhash));
+       memcpy(req->hash, rss_hkey, RSS_HASH_KEY_LEN);
        be_dws_cpu_to_le(req->hash, sizeof(req->hash));
 
-       status = be_mbox_notify_wait(adapter);
-
-       mutex_unlock(&adapter->mbox_lock);
+       status = be_mcc_notify_wait(adapter);
+err:
+       spin_unlock_bh(&adapter->mcc_lock);
        return status;
 }
 
 /* Uses sync mcc */
 int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num,
-                       u8 bcn, u8 sts, u8 state)
+                           u8 bcn, u8 sts, u8 state)
 {
        struct be_mcc_wrb *wrb;
        struct be_cmd_req_enable_disable_beacon *req;
@@ -2076,7 +2089,8 @@ int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num,
        req = embedded_payload(wrb);
 
        be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
-               OPCODE_COMMON_ENABLE_DISABLE_BEACON, sizeof(*req), wrb, NULL);
+                              OPCODE_COMMON_ENABLE_DISABLE_BEACON,
+                              sizeof(*req), wrb, NULL);
 
        req->port_num = port_num;
        req->beacon_state = state;
@@ -2107,7 +2121,8 @@ int be_cmd_get_beacon_state(struct be_adapter *adapter, u8 port_num, u32 *state)
        req = embedded_payload(wrb);
 
        be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
-               OPCODE_COMMON_GET_BEACON_STATE, sizeof(*req), wrb, NULL);
+                              OPCODE_COMMON_GET_BEACON_STATE, sizeof(*req),
+                              wrb, NULL);
 
        req->port_num = port_num;
 
@@ -2146,20 +2161,20 @@ int lancer_cmd_write_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
        req = embedded_payload(wrb);
 
        be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
-                               OPCODE_COMMON_WRITE_OBJECT,
-                               sizeof(struct lancer_cmd_req_write_object), wrb,
-                               NULL);
+                              OPCODE_COMMON_WRITE_OBJECT,
+                              sizeof(struct lancer_cmd_req_write_object), wrb,
+                              NULL);
 
        ctxt = &req->context;
        AMAP_SET_BITS(struct amap_lancer_write_obj_context,
-                       write_length, ctxt, data_size);
+                     write_length, ctxt, data_size);
 
        if (data_size == 0)
                AMAP_SET_BITS(struct amap_lancer_write_obj_context,
-                               eof, ctxt, 1);
+                             eof, ctxt, 1);
        else
                AMAP_SET_BITS(struct amap_lancer_write_obj_context,
-                               eof, ctxt, 0);
+                             eof, ctxt, 0);
 
        be_dws_cpu_to_le(ctxt, sizeof(req->context));
        req->write_offset = cpu_to_le32(data_offset);
@@ -2167,8 +2182,8 @@ int lancer_cmd_write_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
        req->descriptor_count = cpu_to_le32(1);
        req->buf_len = cpu_to_le32(data_size);
        req->addr_low = cpu_to_le32((cmd->dma +
-                               sizeof(struct lancer_cmd_req_write_object))
-                               & 0xFFFFFFFF);
+                                    sizeof(struct lancer_cmd_req_write_object))
+                                   & 0xFFFFFFFF);
        req->addr_high = cpu_to_le32(upper_32_bits(cmd->dma +
                                sizeof(struct lancer_cmd_req_write_object)));
 
@@ -2197,8 +2212,8 @@ err_unlock:
 }
 
 int lancer_cmd_read_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
-               u32 data_size, u32 data_offset, const char *obj_name,
-               u32 *data_read, u32 *eof, u8 *addn_status)
+                          u32 data_size, u32 data_offset, const char *obj_name,
+                          u32 *data_read, u32 *eof, u8 *addn_status)
 {
        struct be_mcc_wrb *wrb;
        struct lancer_cmd_req_read_object *req;
@@ -2216,9 +2231,9 @@ int lancer_cmd_read_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
        req = embedded_payload(wrb);
 
        be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
-                       OPCODE_COMMON_READ_OBJECT,
-                       sizeof(struct lancer_cmd_req_read_object), wrb,
-                       NULL);
+                              OPCODE_COMMON_READ_OBJECT,
+                              sizeof(struct lancer_cmd_req_read_object), wrb,
+                              NULL);
 
        req->desired_read_len = cpu_to_le32(data_size);
        req->read_offset = cpu_to_le32(data_offset);
@@ -2244,7 +2259,7 @@ err_unlock:
 }
 
 int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
-                       u32 flash_type, u32 flash_opcode, u32 buf_size)
+                         u32 flash_type, u32 flash_opcode, u32 buf_size)
 {
        struct be_mcc_wrb *wrb;
        struct be_cmd_write_flashrom *req;
@@ -2261,7 +2276,8 @@ int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
        req = cmd->va;
 
        be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
-               OPCODE_COMMON_WRITE_FLASHROM, cmd->size, wrb, cmd);
+                              OPCODE_COMMON_WRITE_FLASHROM, cmd->size, wrb,
+                              cmd);
 
        req->params.op_type = cpu_to_le32(flash_type);
        req->params.op_code = cpu_to_le32(flash_opcode);
@@ -2318,7 +2334,7 @@ err:
 }
 
 int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac,
-                               struct be_dma_mem *nonemb_cmd)
+                           struct be_dma_mem *nonemb_cmd)
 {
        struct be_mcc_wrb *wrb;
        struct be_cmd_req_acpi_wol_magic_config *req;
@@ -2334,8 +2350,8 @@ int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac,
        req = nonemb_cmd->va;
 
        be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
-               OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG, sizeof(*req), wrb,
-               nonemb_cmd);
+                              OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG, sizeof(*req),
+                              wrb, nonemb_cmd);
        memcpy(req->magic_mac, mac, ETH_ALEN);
 
        status = be_mcc_notify_wait(adapter);
@@ -2363,8 +2379,8 @@ int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
        req = embedded_payload(wrb);
 
        be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
-                       OPCODE_LOWLEVEL_SET_LOOPBACK_MODE, sizeof(*req), wrb,
-                       NULL);
+                              OPCODE_LOWLEVEL_SET_LOOPBACK_MODE, sizeof(*req),
+                              wrb, NULL);
 
        req->src_port = port_num;
        req->dest_port = port_num;
@@ -2378,7 +2394,8 @@ err:
 }
 
 int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
-               u32 loopback_type, u32 pkt_size, u32 num_pkts, u64 pattern)
+                        u32 loopback_type, u32 pkt_size, u32 num_pkts,
+                        u64 pattern)
 {
        struct be_mcc_wrb *wrb;
        struct be_cmd_req_loopback_test *req;
@@ -2396,7 +2413,8 @@ int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
        req = embedded_payload(wrb);
 
        be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
-                       OPCODE_LOWLEVEL_LOOPBACK_TEST, sizeof(*req), wrb, NULL);
+                              OPCODE_LOWLEVEL_LOOPBACK_TEST, sizeof(*req), wrb,
+                              NULL);
 
        req->hdr.timeout = cpu_to_le32(15);
        req->pattern = cpu_to_le64(pattern);
@@ -2421,7 +2439,7 @@ err:
 }
 
 int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern,
-                               u32 byte_cnt, struct be_dma_mem *cmd)
+                       u32 byte_cnt, struct be_dma_mem *cmd)
 {
        struct be_mcc_wrb *wrb;
        struct be_cmd_req_ddrdma_test *req;
@@ -2437,7 +2455,8 @@ int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern,
        }
        req = cmd->va;
        be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
-                       OPCODE_LOWLEVEL_HOST_DDR_DMA, cmd->size, wrb, cmd);
+                              OPCODE_LOWLEVEL_HOST_DDR_DMA, cmd->size, wrb,
+                              cmd);
 
        req->pattern = cpu_to_le64(pattern);
        req->byte_count = cpu_to_le32(byte_cnt);
@@ -2465,7 +2484,7 @@ err:
 }
 
 int be_cmd_get_seeprom_data(struct be_adapter *adapter,
-                               struct be_dma_mem *nonemb_cmd)
+                           struct be_dma_mem *nonemb_cmd)
 {
        struct be_mcc_wrb *wrb;
        struct be_cmd_req_seeprom_read *req;
@@ -2481,8 +2500,8 @@ int be_cmd_get_seeprom_data(struct be_adapter *adapter,
        req = nonemb_cmd->va;
 
        be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
-                       OPCODE_COMMON_SEEPROM_READ, sizeof(*req), wrb,
-                       nonemb_cmd);
+                              OPCODE_COMMON_SEEPROM_READ, sizeof(*req), wrb,
+                              nonemb_cmd);
 
        status = be_mcc_notify_wait(adapter);
 
@@ -2510,8 +2529,7 @@ int be_cmd_get_phy_info(struct be_adapter *adapter)
                goto err;
        }
        cmd.size = sizeof(struct be_cmd_req_get_phy_info);
-       cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size,
-                                       &cmd.dma);
+       cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
        if (!cmd.va) {
                dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
                status = -ENOMEM;
@@ -2521,8 +2539,8 @@ int be_cmd_get_phy_info(struct be_adapter *adapter)
        req = cmd.va;
 
        be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
-                       OPCODE_COMMON_GET_PHY_DETAILS, sizeof(*req),
-                       wrb, &cmd);
+                              OPCODE_COMMON_GET_PHY_DETAILS, sizeof(*req),
+                              wrb, &cmd);
 
        status = be_mcc_notify_wait(adapter);
        if (!status) {
@@ -2544,8 +2562,7 @@ int be_cmd_get_phy_info(struct be_adapter *adapter)
                                BE_SUPPORTED_SPEED_1GBPS;
                }
        }
-       pci_free_consistent(adapter->pdev, cmd.size,
-                               cmd.va, cmd.dma);
+       pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
 err:
        spin_unlock_bh(&adapter->mcc_lock);
        return status;
@@ -2568,7 +2585,7 @@ int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain)
        req = embedded_payload(wrb);
 
        be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
-                       OPCODE_COMMON_SET_QOS, sizeof(*req), wrb, NULL);
+                              OPCODE_COMMON_SET_QOS, sizeof(*req), wrb, NULL);
 
        req->hdr.domain = domain;
        req->valid_bits = cpu_to_le32(BE_QOS_BITS_NIC);
@@ -2597,10 +2614,9 @@ int be_cmd_get_cntl_attributes(struct be_adapter *adapter)
        memset(&attribs_cmd, 0, sizeof(struct be_dma_mem));
        attribs_cmd.size = sizeof(struct be_cmd_resp_cntl_attribs);
        attribs_cmd.va = pci_alloc_consistent(adapter->pdev, attribs_cmd.size,
-                                               &attribs_cmd.dma);
+                                             &attribs_cmd.dma);
        if (!attribs_cmd.va) {
-               dev_err(&adapter->pdev->dev,
-                               "Memory allocation failure\n");
+               dev_err(&adapter->pdev->dev, "Memory allocation failure\n");
                status = -ENOMEM;
                goto err;
        }
@@ -2613,8 +2629,8 @@ int be_cmd_get_cntl_attributes(struct be_adapter *adapter)
        req = attribs_cmd.va;
 
        be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
-                        OPCODE_COMMON_GET_CNTL_ATTRIBUTES, payload_len, wrb,
-                       &attribs_cmd);
+                              OPCODE_COMMON_GET_CNTL_ATTRIBUTES, payload_len,
+                              wrb, &attribs_cmd);
 
        status = be_mbox_notify_wait(adapter);
        if (!status) {
@@ -2649,7 +2665,8 @@ int be_cmd_req_native_mode(struct be_adapter *adapter)
        req = embedded_payload(wrb);
 
        be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
-               OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP, sizeof(*req), wrb, NULL);
+                              OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP,
+                              sizeof(*req), wrb, NULL);
 
        req->valid_cap_flags = cpu_to_le32(CAPABILITY_SW_TIMESTAMPS |
                                CAPABILITY_BE3_NATIVE_ERX_API);
@@ -2762,12 +2779,12 @@ int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
        memset(&get_mac_list_cmd, 0, sizeof(struct be_dma_mem));
        get_mac_list_cmd.size = sizeof(struct be_cmd_resp_get_mac_list);
        get_mac_list_cmd.va = pci_alloc_consistent(adapter->pdev,
-                       get_mac_list_cmd.size,
-                       &get_mac_list_cmd.dma);
+                                                  get_mac_list_cmd.size,
+                                                  &get_mac_list_cmd.dma);
 
        if (!get_mac_list_cmd.va) {
                dev_err(&adapter->pdev->dev,
-                               "Memory allocation failure during GET_MAC_LIST\n");
+                       "Memory allocation failure during GET_MAC_LIST\n");
                return -ENOMEM;
        }
 
@@ -2831,18 +2848,18 @@ int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
                /* If no active mac_id found, return first mac addr */
                *pmac_id_valid = false;
                memcpy(mac, resp->macaddr_list[0].mac_addr_id.macaddr,
-                                                               ETH_ALEN);
+                      ETH_ALEN);
        }
 
 out:
        spin_unlock_bh(&adapter->mcc_lock);
        pci_free_consistent(adapter->pdev, get_mac_list_cmd.size,
-                       get_mac_list_cmd.va, get_mac_list_cmd.dma);
+                           get_mac_list_cmd.va, get_mac_list_cmd.dma);
        return status;
 }
 
-int be_cmd_get_active_mac(struct be_adapter *adapter, u32 curr_pmac_id, u8 *mac,
-                         u32 if_handle, bool active, u32 domain)
+int be_cmd_get_active_mac(struct be_adapter *adapter, u32 curr_pmac_id,
+                         u8 *mac, u32 if_handle, bool active, u32 domain)
 {
 
        if (!active)
@@ -2892,7 +2909,7 @@ int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array,
        memset(&cmd, 0, sizeof(struct be_dma_mem));
        cmd.size = sizeof(struct be_cmd_req_set_mac_list);
        cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size,
-                       &cmd.dma, GFP_KERNEL);
+                                   &cmd.dma, GFP_KERNEL);
        if (!cmd.va)
                return -ENOMEM;
 
@@ -2906,8 +2923,8 @@ int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array,
 
        req = cmd.va;
        be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
-                               OPCODE_COMMON_SET_MAC_LIST, sizeof(*req),
-                               wrb, &cmd);
+                              OPCODE_COMMON_SET_MAC_LIST, sizeof(*req),
+                              wrb, &cmd);
 
        req->hdr.domain = domain;
        req->mac_count = mac_count;
@@ -2917,8 +2934,7 @@ int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array,
        status = be_mcc_notify_wait(adapter);
 
 err:
-       dma_free_coherent(&adapter->pdev->dev, cmd.size,
-                               cmd.va, cmd.dma);
+       dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
        spin_unlock_bh(&adapter->mcc_lock);
        return status;
 }
@@ -2963,7 +2979,8 @@ int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid,
        ctxt = &req->context;
 
        be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
-                       OPCODE_COMMON_SET_HSW_CONFIG, sizeof(*req), wrb, NULL);
+                              OPCODE_COMMON_SET_HSW_CONFIG, sizeof(*req), wrb,
+                              NULL);
 
        req->hdr.domain = domain;
        AMAP_SET_BITS(struct amap_set_hsw_context, interface_id, ctxt, intf_id);
@@ -3009,7 +3026,8 @@ int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid,
        ctxt = &req->context;
 
        be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
-                       OPCODE_COMMON_GET_HSW_CONFIG, sizeof(*req), wrb, NULL);
+                              OPCODE_COMMON_GET_HSW_CONFIG, sizeof(*req), wrb,
+                              NULL);
 
        req->hdr.domain = domain;
        AMAP_SET_BITS(struct amap_get_hsw_req_context, interface_id,
@@ -3027,10 +3045,9 @@ int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid,
        if (!status) {
                struct be_cmd_resp_get_hsw_config *resp =
                                                embedded_payload(wrb);
-               be_dws_le_to_cpu(&resp->context,
-                                               sizeof(resp->context));
+               be_dws_le_to_cpu(&resp->context, sizeof(resp->context));
                vid = AMAP_GET_BITS(struct amap_get_hsw_resp_context,
-                                                       pvid, &resp->context);
+                                   pvid, &resp->context);
                if (pvid)
                        *pvid = le16_to_cpu(vid);
                if (mode)
@@ -3062,11 +3079,9 @@ int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter)
 
        memset(&cmd, 0, sizeof(struct be_dma_mem));
        cmd.size = sizeof(struct be_cmd_resp_acpi_wol_magic_config_v1);
-       cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size,
-                                              &cmd.dma);
+       cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
        if (!cmd.va) {
-               dev_err(&adapter->pdev->dev,
-                               "Memory allocation failure\n");
+               dev_err(&adapter->pdev->dev, "Memory allocation failure\n");
                status = -ENOMEM;
                goto err;
        }
@@ -3349,8 +3364,7 @@ int be_cmd_get_func_config(struct be_adapter *adapter, struct be_resources *res)
 
        memset(&cmd, 0, sizeof(struct be_dma_mem));
        cmd.size = sizeof(struct be_cmd_resp_get_func_config);
-       cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size,
-                                     &cmd.dma);
+       cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
        if (!cmd.va) {
                dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
                status = -ENOMEM;
@@ -3396,7 +3410,7 @@ err:
 
 /* Uses mbox */
 static int be_cmd_get_profile_config_mbox(struct be_adapter *adapter,
-                                       u8 domain, struct be_dma_mem *cmd)
+                                         u8 domain, struct be_dma_mem *cmd)
 {
        struct be_mcc_wrb *wrb;
        struct be_cmd_req_get_profile_config *req;
@@ -3424,7 +3438,7 @@ static int be_cmd_get_profile_config_mbox(struct be_adapter *adapter,
 
 /* Uses sync mcc */
 static int be_cmd_get_profile_config_mccq(struct be_adapter *adapter,
-                                       u8 domain, struct be_dma_mem *cmd)
+                                         u8 domain, struct be_dma_mem *cmd)
 {
        struct be_mcc_wrb *wrb;
        struct be_cmd_req_get_profile_config *req;
@@ -3484,8 +3498,8 @@ int be_cmd_get_profile_config(struct be_adapter *adapter,
        resp = cmd.va;
        desc_count = le32_to_cpu(resp->desc_count);
 
-       pcie =  be_get_pcie_desc(adapter->pdev->devfn, resp->func_param,
-                                desc_count);
+       pcie = be_get_pcie_desc(adapter->pdev->devfn, resp->func_param,
+                               desc_count);
        if (pcie)
                res->max_vfs = le16_to_cpu(pcie->num_vfs);
 
@@ -3859,7 +3873,7 @@ err:
 }
 
 int be_roce_mcc_cmd(void *netdev_handle, void *wrb_payload,
-                       int wrb_payload_size, u16 *cmd_status, u16 *ext_status)
+                   int wrb_payload_size, u16 *cmd_status, u16 *ext_status)
 {
        struct be_adapter *adapter = netdev_priv(netdev_handle);
        struct be_mcc_wrb *wrb;
index b60e4d53c1c9a9f29be9d1c2c07099dab28b378a..228d4b611084154f3e3699b7ac23ccc7e5bd6487 100644 (file)
@@ -2060,7 +2060,7 @@ int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver,
                      char *fw_on_flash);
 int be_cmd_modify_eqd(struct be_adapter *adapter, struct be_set_eqd *, int num);
 int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
-                      u32 num, bool promiscuous);
+                      u32 num);
 int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 status);
 int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc);
 int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc);
@@ -2068,7 +2068,7 @@ int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num,
                        u32 *function_mode, u32 *function_caps, u16 *asic_rev);
 int be_cmd_reset_function(struct be_adapter *adapter);
 int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable,
-                     u32 rss_hash_opts, u16 table_size);
+                     u32 rss_hash_opts, u16 table_size, u8 *rss_hkey);
 int be_process_mcc(struct be_adapter *adapter);
 int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num, u8 beacon,
                            u8 status, u8 state);
index 15ba96cba65df1ba051cc3b2e49b72640d92c217..970ae337daace7966c96baf80a92072c996de91e 100644 (file)
@@ -132,6 +132,7 @@ static const struct be_ethtool_stat et_rx_stats[] = {
        {DRVSTAT_RX_INFO(rx_bytes)},/* If moving this member see above note */
        {DRVSTAT_RX_INFO(rx_pkts)}, /* If moving this member see above note */
        {DRVSTAT_RX_INFO(rx_compl)},
+       {DRVSTAT_RX_INFO(rx_compl_err)},
        {DRVSTAT_RX_INFO(rx_mcast_pkts)},
        /* Number of page allocation failures while posting receive buffers
         * to HW.
@@ -181,7 +182,7 @@ static const char et_self_tests[][ETH_GSTRING_LEN] = {
 #define BE_NO_LOOPBACK 0xff
 
 static void be_get_drvinfo(struct net_device *netdev,
-                               struct ethtool_drvinfo *drvinfo)
+                          struct ethtool_drvinfo *drvinfo)
 {
        struct be_adapter *adapter = netdev_priv(netdev);
 
@@ -201,8 +202,7 @@ static void be_get_drvinfo(struct net_device *netdev,
        drvinfo->eedump_len = 0;
 }
 
-static u32
-lancer_cmd_get_file_len(struct be_adapter *adapter, u8 *file_name)
+static u32 lancer_cmd_get_file_len(struct be_adapter *adapter, u8 *file_name)
 {
        u32 data_read = 0, eof;
        u8 addn_status;
@@ -212,14 +212,14 @@ lancer_cmd_get_file_len(struct be_adapter *adapter, u8 *file_name)
        memset(&data_len_cmd, 0, sizeof(data_len_cmd));
        /* data_offset and data_size should be 0 to get reg len */
        status = lancer_cmd_read_object(adapter, &data_len_cmd, 0, 0,
-                               file_name, &data_read, &eof, &addn_status);
+                                       file_name, &data_read, &eof,
+                                       &addn_status);
 
        return data_read;
 }
 
-static int
-lancer_cmd_read_file(struct be_adapter *adapter, u8 *file_name,
-               u32 buf_len, void *buf)
+static int lancer_cmd_read_file(struct be_adapter *adapter, u8 *file_name,
+                               u32 buf_len, void *buf)
 {
        struct be_dma_mem read_cmd;
        u32 read_len = 0, total_read_len = 0, chunk_size;
@@ -229,11 +229,11 @@ lancer_cmd_read_file(struct be_adapter *adapter, u8 *file_name,
 
        read_cmd.size = LANCER_READ_FILE_CHUNK;
        read_cmd.va = pci_alloc_consistent(adapter->pdev, read_cmd.size,
-                       &read_cmd.dma);
+                                          &read_cmd.dma);
 
        if (!read_cmd.va) {
                dev_err(&adapter->pdev->dev,
-                               "Memory allocation failure while reading dump\n");
+                       "Memory allocation failure while reading dump\n");
                return -ENOMEM;
        }
 
@@ -242,8 +242,8 @@ lancer_cmd_read_file(struct be_adapter *adapter, u8 *file_name,
                                LANCER_READ_FILE_CHUNK);
                chunk_size = ALIGN(chunk_size, 4);
                status = lancer_cmd_read_object(adapter, &read_cmd, chunk_size,
-                               total_read_len, file_name, &read_len,
-                               &eof, &addn_status);
+                                               total_read_len, file_name,
+                                               &read_len, &eof, &addn_status);
                if (!status) {
                        memcpy(buf + total_read_len, read_cmd.va, read_len);
                        total_read_len += read_len;
@@ -254,13 +254,12 @@ lancer_cmd_read_file(struct be_adapter *adapter, u8 *file_name,
                }
        }
        pci_free_consistent(adapter->pdev, read_cmd.size, read_cmd.va,
-                       read_cmd.dma);
+                           read_cmd.dma);
 
        return status;
 }
 
-static int
-be_get_reg_len(struct net_device *netdev)
+static int be_get_reg_len(struct net_device *netdev)
 {
        struct be_adapter *adapter = netdev_priv(netdev);
        u32 log_size = 0;
@@ -271,7 +270,7 @@ be_get_reg_len(struct net_device *netdev)
        if (be_physfn(adapter)) {
                if (lancer_chip(adapter))
                        log_size = lancer_cmd_get_file_len(adapter,
-                                       LANCER_FW_DUMP_FILE);
+                                                          LANCER_FW_DUMP_FILE);
                else
                        be_cmd_get_reg_len(adapter, &log_size);
        }
@@ -287,7 +286,7 @@ be_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *buf)
                memset(buf, 0, regs->len);
                if (lancer_chip(adapter))
                        lancer_cmd_read_file(adapter, LANCER_FW_DUMP_FILE,
-                                       regs->len, buf);
+                                            regs->len, buf);
                else
                        be_cmd_get_regs(adapter, regs->len, buf);
        }
@@ -337,9 +336,8 @@ static int be_set_coalesce(struct net_device *netdev,
        return 0;
 }
 
-static void
-be_get_ethtool_stats(struct net_device *netdev,
-               struct ethtool_stats *stats, uint64_t *data)
+static void be_get_ethtool_stats(struct net_device *netdev,
+                                struct ethtool_stats *stats, uint64_t *data)
 {
        struct be_adapter *adapter = netdev_priv(netdev);
        struct be_rx_obj *rxo;
@@ -390,9 +388,8 @@ be_get_ethtool_stats(struct net_device *netdev,
        }
 }
 
-static void
-be_get_stat_strings(struct net_device *netdev, uint32_t stringset,
-               uint8_t *data)
+static void be_get_stat_strings(struct net_device *netdev, uint32_t stringset,
+                               uint8_t *data)
 {
        struct be_adapter *adapter = netdev_priv(netdev);
        int i, j;
@@ -642,16 +639,15 @@ be_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *ecmd)
        adapter->rx_fc = ecmd->rx_pause;
 
        status = be_cmd_set_flow_control(adapter,
-                                       adapter->tx_fc, adapter->rx_fc);
+                                        adapter->tx_fc, adapter->rx_fc);
        if (status)
                dev_warn(&adapter->pdev->dev, "Pause param set failed.\n");
 
        return status;
 }
 
-static int
-be_set_phys_id(struct net_device *netdev,
-              enum ethtool_phys_id_state state)
+static int be_set_phys_id(struct net_device *netdev,
+                         enum ethtool_phys_id_state state)
 {
        struct be_adapter *adapter = netdev_priv(netdev);
 
@@ -708,8 +704,7 @@ static int be_set_dump(struct net_device *netdev, struct ethtool_dump *dump)
        return status;
 }
 
-static void
-be_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
+static void be_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
 {
        struct be_adapter *adapter = netdev_priv(netdev);
 
@@ -723,8 +718,7 @@ be_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
        memset(&wol->sopass, 0, sizeof(wol->sopass));
 }
 
-static int
-be_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
+static int be_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
 {
        struct be_adapter *adapter = netdev_priv(netdev);
 
@@ -744,8 +738,7 @@ be_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
        return 0;
 }
 
-static int
-be_test_ddr_dma(struct be_adapter *adapter)
+static int be_test_ddr_dma(struct be_adapter *adapter)
 {
        int ret, i;
        struct be_dma_mem ddrdma_cmd;
@@ -761,7 +754,7 @@ be_test_ddr_dma(struct be_adapter *adapter)
 
        for (i = 0; i < 2; i++) {
                ret = be_cmd_ddr_dma_test(adapter, pattern[i],
-                                       4096, &ddrdma_cmd);
+                                         4096, &ddrdma_cmd);
                if (ret != 0)
                        goto err;
        }
@@ -773,20 +766,17 @@ err:
 }
 
 static u64 be_loopback_test(struct be_adapter *adapter, u8 loopback_type,
-                               u64 *status)
+                           u64 *status)
 {
-       be_cmd_set_loopback(adapter, adapter->hba_port_num,
-                               loopback_type, 1);
+       be_cmd_set_loopback(adapter, adapter->hba_port_num, loopback_type, 1);
        *status = be_cmd_loopback_test(adapter, adapter->hba_port_num,
-                               loopback_type, 1500,
-                               2, 0xabc);
-       be_cmd_set_loopback(adapter, adapter->hba_port_num,
-                               BE_NO_LOOPBACK, 1);
+                                      loopback_type, 1500, 2, 0xabc);
+       be_cmd_set_loopback(adapter, adapter->hba_port_num, BE_NO_LOOPBACK, 1);
        return *status;
 }
 
-static void
-be_self_test(struct net_device *netdev, struct ethtool_test *test, u64 *data)
+static void be_self_test(struct net_device *netdev, struct ethtool_test *test,
+                        u64 *data)
 {
        struct be_adapter *adapter = netdev_priv(netdev);
        int status;
@@ -801,12 +791,10 @@ be_self_test(struct net_device *netdev, struct ethtool_test *test, u64 *data)
        memset(data, 0, sizeof(u64) * ETHTOOL_TESTS_NUM);
 
        if (test->flags & ETH_TEST_FL_OFFLINE) {
-               if (be_loopback_test(adapter, BE_MAC_LOOPBACK,
-                                    &data[0]) != 0)
+               if (be_loopback_test(adapter, BE_MAC_LOOPBACK, &data[0]) != 0)
                        test->flags |= ETH_TEST_FL_FAILED;
 
-               if (be_loopback_test(adapter, BE_PHY_LOOPBACK,
-                                    &data[1]) != 0)
+               if (be_loopback_test(adapter, BE_PHY_LOOPBACK, &data[1]) != 0)
                        test->flags |= ETH_TEST_FL_FAILED;
 
                if (test->flags & ETH_TEST_FL_EXTERNAL_LB) {
@@ -832,16 +820,14 @@ be_self_test(struct net_device *netdev, struct ethtool_test *test, u64 *data)
        }
 }
 
-static int
-be_do_flash(struct net_device *netdev, struct ethtool_flash *efl)
+static int be_do_flash(struct net_device *netdev, struct ethtool_flash *efl)
 {
        struct be_adapter *adapter = netdev_priv(netdev);
 
        return be_load_fw(adapter, efl->data);
 }
 
-static int
-be_get_eeprom_len(struct net_device *netdev)
+static int be_get_eeprom_len(struct net_device *netdev)
 {
        struct be_adapter *adapter = netdev_priv(netdev);
 
@@ -851,18 +837,17 @@ be_get_eeprom_len(struct net_device *netdev)
        if (lancer_chip(adapter)) {
                if (be_physfn(adapter))
                        return lancer_cmd_get_file_len(adapter,
-                                       LANCER_VPD_PF_FILE);
+                                                      LANCER_VPD_PF_FILE);
                else
                        return lancer_cmd_get_file_len(adapter,
-                                       LANCER_VPD_VF_FILE);
+                                                      LANCER_VPD_VF_FILE);
        } else {
                return BE_READ_SEEPROM_LEN;
        }
 }
 
-static int
-be_read_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
-                       uint8_t *data)
+static int be_read_eeprom(struct net_device *netdev,
+                         struct ethtool_eeprom *eeprom, uint8_t *data)
 {
        struct be_adapter *adapter = netdev_priv(netdev);
        struct be_dma_mem eeprom_cmd;
@@ -875,10 +860,10 @@ be_read_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
        if (lancer_chip(adapter)) {
                if (be_physfn(adapter))
                        return lancer_cmd_read_file(adapter, LANCER_VPD_PF_FILE,
-                                       eeprom->len, data);
+                                                   eeprom->len, data);
                else
                        return lancer_cmd_read_file(adapter, LANCER_VPD_VF_FILE,
-                                       eeprom->len, data);
+                                                   eeprom->len, data);
        }
 
        eeprom->magic = BE_VENDOR_ID | (adapter->pdev->device<<16);
@@ -933,27 +918,27 @@ static u64 be_get_rss_hash_opts(struct be_adapter *adapter, u64 flow_type)
 
        switch (flow_type) {
        case TCP_V4_FLOW:
-               if (adapter->rss_flags & RSS_ENABLE_IPV4)
+               if (adapter->rss_info.rss_flags & RSS_ENABLE_IPV4)
                        data |= RXH_IP_DST | RXH_IP_SRC;
-               if (adapter->rss_flags & RSS_ENABLE_TCP_IPV4)
+               if (adapter->rss_info.rss_flags & RSS_ENABLE_TCP_IPV4)
                        data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
                break;
        case UDP_V4_FLOW:
-               if (adapter->rss_flags & RSS_ENABLE_IPV4)
+               if (adapter->rss_info.rss_flags & RSS_ENABLE_IPV4)
                        data |= RXH_IP_DST | RXH_IP_SRC;
-               if (adapter->rss_flags & RSS_ENABLE_UDP_IPV4)
+               if (adapter->rss_info.rss_flags & RSS_ENABLE_UDP_IPV4)
                        data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
                break;
        case TCP_V6_FLOW:
-               if (adapter->rss_flags & RSS_ENABLE_IPV6)
+               if (adapter->rss_info.rss_flags & RSS_ENABLE_IPV6)
                        data |= RXH_IP_DST | RXH_IP_SRC;
-               if (adapter->rss_flags & RSS_ENABLE_TCP_IPV6)
+               if (adapter->rss_info.rss_flags & RSS_ENABLE_TCP_IPV6)
                        data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
                break;
        case UDP_V6_FLOW:
-               if (adapter->rss_flags & RSS_ENABLE_IPV6)
+               if (adapter->rss_info.rss_flags & RSS_ENABLE_IPV6)
                        data |= RXH_IP_DST | RXH_IP_SRC;
-               if (adapter->rss_flags & RSS_ENABLE_UDP_IPV6)
+               if (adapter->rss_info.rss_flags & RSS_ENABLE_UDP_IPV6)
                        data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
                break;
        }
@@ -962,7 +947,7 @@ static u64 be_get_rss_hash_opts(struct be_adapter *adapter, u64 flow_type)
 }
 
 static int be_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
-                     u32 *rule_locs)
+                       u32 *rule_locs)
 {
        struct be_adapter *adapter = netdev_priv(netdev);
 
@@ -992,7 +977,7 @@ static int be_set_rss_hash_opts(struct be_adapter *adapter,
        struct be_rx_obj *rxo;
        int status = 0, i, j;
        u8 rsstable[128];
-       u32 rss_flags = adapter->rss_flags;
+       u32 rss_flags = adapter->rss_info.rss_flags;
 
        if (cmd->data != L3_RSS_FLAGS &&
            cmd->data != (L3_RSS_FLAGS | L4_RSS_FLAGS))
@@ -1039,7 +1024,7 @@ static int be_set_rss_hash_opts(struct be_adapter *adapter,
                return -EINVAL;
        }
 
-       if (rss_flags == adapter->rss_flags)
+       if (rss_flags == adapter->rss_info.rss_flags)
                return status;
 
        if (be_multi_rxq(adapter)) {
@@ -1051,9 +1036,11 @@ static int be_set_rss_hash_opts(struct be_adapter *adapter,
                        }
                }
        }
-       status = be_cmd_rss_config(adapter, rsstable, rss_flags, 128);
+
+       status = be_cmd_rss_config(adapter, adapter->rss_info.rsstable,
+                                  rss_flags, 128, adapter->rss_info.rss_hkey);
        if (!status)
-               adapter->rss_flags = rss_flags;
+               adapter->rss_info.rss_flags = rss_flags;
 
        return status;
 }
@@ -1103,6 +1090,68 @@ static int be_set_channels(struct net_device  *netdev,
        return be_update_queues(adapter);
 }
 
+static u32 be_get_rxfh_indir_size(struct net_device *netdev)
+{
+       return RSS_INDIR_TABLE_LEN;
+}
+
+static u32 be_get_rxfh_key_size(struct net_device *netdev)
+{
+       return RSS_HASH_KEY_LEN;
+}
+
+static int be_get_rxfh(struct net_device *netdev, u32 *indir, u8 *hkey)
+{
+       struct be_adapter *adapter = netdev_priv(netdev);
+       int i;
+       struct rss_info *rss = &adapter->rss_info;
+
+       if (indir) {
+               for (i = 0; i < RSS_INDIR_TABLE_LEN; i++)
+                       indir[i] = rss->rss_queue[i];
+       }
+
+       if (hkey)
+               memcpy(hkey, rss->rss_hkey, RSS_HASH_KEY_LEN);
+
+       return 0;
+}
+
+static int be_set_rxfh(struct net_device *netdev, u32 *indir, u8 *hkey)
+{
+       int rc = 0, i, j;
+       struct be_adapter *adapter = netdev_priv(netdev);
+       u8 rsstable[RSS_INDIR_TABLE_LEN];
+
+       if (indir) {
+               struct be_rx_obj *rxo;
+               for (i = 0; i < RSS_INDIR_TABLE_LEN; i++) {
+                       j = indir[i];
+                       rxo = &adapter->rx_obj[j];
+                       rsstable[i] = rxo->rss_id;
+                       adapter->rss_info.rss_queue[i] = j;
+               }
+       } else {
+               memcpy(rsstable, adapter->rss_info.rsstable,
+                      RSS_INDIR_TABLE_LEN);
+       }
+
+       if (!hkey)
+               hkey =  adapter->rss_info.rss_hkey;
+
+       rc = be_cmd_rss_config(adapter, rsstable,
+                       adapter->rss_info.rss_flags,
+                       RSS_INDIR_TABLE_LEN, hkey);
+       if (rc) {
+               adapter->rss_info.rss_flags = RSS_ENABLE_NONE;
+               return -EIO;
+       }
+       memcpy(adapter->rss_info.rss_hkey, hkey, RSS_HASH_KEY_LEN);
+       memcpy(adapter->rss_info.rsstable, rsstable,
+              RSS_INDIR_TABLE_LEN);
+       return 0;
+}
+
 const struct ethtool_ops be_ethtool_ops = {
        .get_settings = be_get_settings,
        .get_drvinfo = be_get_drvinfo,
@@ -1129,6 +1178,10 @@ const struct ethtool_ops be_ethtool_ops = {
        .self_test = be_self_test,
        .get_rxnfc = be_get_rxnfc,
        .set_rxnfc = be_set_rxnfc,
+       .get_rxfh_indir_size = be_get_rxfh_indir_size,
+       .get_rxfh_key_size = be_get_rxfh_key_size,
+       .get_rxfh = be_get_rxfh,
+       .set_rxfh = be_set_rxfh,
        .get_channels = be_get_channels,
        .set_channels = be_set_channels
 };
index a18645407d2152b43353a50b76ccf6317ef90151..dcc5e5c69743d8e5fdffb6e4b3f0e53e394bf518 100644 (file)
@@ -134,7 +134,7 @@ static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
 }
 
 static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
-               u16 len, u16 entry_size)
+                         u16 len, u16 entry_size)
 {
        struct be_dma_mem *mem = &q->dma_mem;
 
@@ -154,7 +154,7 @@ static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
        u32 reg, enabled;
 
        pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
-                               &reg);
+                             &reg);
        enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
 
        if (!enabled && enable)
@@ -165,7 +165,7 @@ static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
                return;
 
        pci_write_config_dword(adapter->pdev,
-                       PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
+                              PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
 }
 
 static void be_intr_set(struct be_adapter *adapter, bool enable)
@@ -206,12 +206,11 @@ static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
 }
 
 static void be_eq_notify(struct be_adapter *adapter, u16 qid,
-               bool arm, bool clear_int, u16 num_popped)
+                        bool arm, bool clear_int, u16 num_popped)
 {
        u32 val = 0;
        val |= qid & DB_EQ_RING_ID_MASK;
-       val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
-                       DB_EQ_RING_ID_EXT_MASK_SHIFT);
+       val |= ((qid & DB_EQ_RING_ID_EXT_MASK) << DB_EQ_RING_ID_EXT_MASK_SHIFT);
 
        if (adapter->eeh_error)
                return;
@@ -477,7 +476,7 @@ static void populate_be_v2_stats(struct be_adapter *adapter)
        drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
        drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
        adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
-       if (be_roce_supported(adapter))  {
+       if (be_roce_supported(adapter)) {
                drvs->rx_roce_bytes_lsd = port_stats->roce_bytes_received_lsd;
                drvs->rx_roce_bytes_msd = port_stats->roce_bytes_received_msd;
                drvs->rx_roce_frames = port_stats->roce_frames_received;
@@ -491,8 +490,7 @@ static void populate_lancer_stats(struct be_adapter *adapter)
 {
 
        struct be_drv_stats *drvs = &adapter->drv_stats;
-       struct lancer_pport_stats *pport_stats =
-                                       pport_stats_from_cmd(adapter);
+       struct lancer_pport_stats *pport_stats = pport_stats_from_cmd(adapter);
 
        be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
        drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
@@ -539,8 +537,7 @@ static void accumulate_16bit_val(u32 *acc, u16 val)
 }
 
 static void populate_erx_stats(struct be_adapter *adapter,
-                       struct be_rx_obj *rxo,
-                       u32 erx_stat)
+                              struct be_rx_obj *rxo, u32 erx_stat)
 {
        if (!BEx_chip(adapter))
                rx_stats(rxo)->rx_drops_no_frags = erx_stat;
@@ -579,7 +576,7 @@ void be_parse_stats(struct be_adapter *adapter)
 }
 
 static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
-                                       struct rtnl_link_stats64 *stats)
+                                               struct rtnl_link_stats64 *stats)
 {
        struct be_adapter *adapter = netdev_priv(netdev);
        struct be_drv_stats *drvs = &adapter->drv_stats;
@@ -660,7 +657,8 @@ void be_link_status_update(struct be_adapter *adapter, u8 link_status)
 }
 
 static void be_tx_stats_update(struct be_tx_obj *txo,
-                       u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
+                              u32 wrb_cnt, u32 copied, u32 gso_segs,
+                              bool stopped)
 {
        struct be_tx_stats *stats = tx_stats(txo);
 
@@ -676,7 +674,7 @@ static void be_tx_stats_update(struct be_tx_obj *txo,
 
 /* Determine number of WRB entries needed to xmit data in an skb */
 static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
-                                                               bool *dummy)
+                          bool *dummy)
 {
        int cnt = (skb->len > skb->data_len);
 
@@ -704,7 +702,7 @@ static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
 }
 
 static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
-                                       struct sk_buff *skb)
+                                    struct sk_buff *skb)
 {
        u8 vlan_prio;
        u16 vlan_tag;
@@ -733,7 +731,8 @@ static u16 skb_ip_proto(struct sk_buff *skb)
 }
 
 static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
-               struct sk_buff *skb, u32 wrb_cnt, u32 len, bool skip_hw_vlan)
+                        struct sk_buff *skb, u32 wrb_cnt, u32 len,
+                        bool skip_hw_vlan)
 {
        u16 vlan_tag, proto;
 
@@ -774,7 +773,7 @@ static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
 }
 
 static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
-               bool unmap_single)
+                         bool unmap_single)
 {
        dma_addr_t dma;
 
@@ -791,8 +790,8 @@ static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
 }
 
 static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
-               struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb,
-               bool skip_hw_vlan)
+                       struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb,
+                       bool skip_hw_vlan)
 {
        dma_addr_t busaddr;
        int i, copied = 0;
@@ -821,8 +820,7 @@ static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
        }
 
        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
-               const struct skb_frag_struct *frag =
-                       &skb_shinfo(skb)->frags[i];
+               const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
                busaddr = skb_frag_dma_map(dev, frag, 0,
                                           skb_frag_size(frag), DMA_TO_DEVICE);
                if (dma_mapping_error(dev, busaddr))
@@ -927,8 +925,7 @@ static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
        return vlan_tx_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
 }
 
-static int be_ipv6_tx_stall_chk(struct be_adapter *adapter,
-                               struct sk_buff *skb)
+static int be_ipv6_tx_stall_chk(struct be_adapter *adapter, struct sk_buff *skb)
 {
        return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
 }
@@ -959,7 +956,7 @@ static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter,
         */
        if (be_pvid_tagging_enabled(adapter) &&
            veh->h_vlan_proto == htons(ETH_P_8021Q))
-                       *skip_hw_vlan = true;
+               *skip_hw_vlan = true;
 
        /* HW has a bug wherein it will calculate CSUM for VLAN
         * pkts even though it is disabled.
@@ -1077,16 +1074,15 @@ static int be_change_mtu(struct net_device *netdev, int new_mtu)
 {
        struct be_adapter *adapter = netdev_priv(netdev);
        if (new_mtu < BE_MIN_MTU ||
-                       new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
-                                       (ETH_HLEN + ETH_FCS_LEN))) {
+           new_mtu > (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN))) {
                dev_info(&adapter->pdev->dev,
-                       "MTU must be between %d and %d bytes\n",
-                       BE_MIN_MTU,
-                       (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
+                        "MTU must be between %d and %d bytes\n",
+                        BE_MIN_MTU,
+                        (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
                return -EINVAL;
        }
        dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
-                       netdev->mtu, new_mtu);
+                netdev->mtu, new_mtu);
        netdev->mtu = new_mtu;
        return 0;
 }
@@ -1098,7 +1094,7 @@ static int be_change_mtu(struct net_device *netdev, int new_mtu)
 static int be_vid_config(struct be_adapter *adapter)
 {
        u16 vids[BE_NUM_VLANS_SUPPORTED];
-       u16 num = 0, i;
+       u16 num = 0, i = 0;
        int status = 0;
 
        /* No need to further configure vids if in promiscuous mode */
@@ -1109,13 +1105,10 @@ static int be_vid_config(struct be_adapter *adapter)
                goto set_vlan_promisc;
 
        /* Construct VLAN Table to give to HW */
-       for (i = 0; i < VLAN_N_VID; i++)
-               if (adapter->vlan_tag[i])
-                       vids[num++] = cpu_to_le16(i);
-
-       status = be_cmd_vlan_config(adapter, adapter->if_handle,
-                                   vids, num, 0);
+       for_each_set_bit(i, adapter->vids, VLAN_N_VID)
+               vids[num++] = cpu_to_le16(i);
 
+       status = be_cmd_vlan_config(adapter, adapter->if_handle, vids, num);
        if (status) {
                /* Set to VLAN promisc mode as setting VLAN filter failed */
                if (status == MCC_ADDL_STS_INSUFFICIENT_RESOURCES)
@@ -1160,16 +1153,16 @@ static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
        if (lancer_chip(adapter) && vid == 0)
                return status;
 
-       if (adapter->vlan_tag[vid])
+       if (test_bit(vid, adapter->vids))
                return status;
 
-       adapter->vlan_tag[vid] = 1;
+       set_bit(vid, adapter->vids);
        adapter->vlans_added++;
 
        status = be_vid_config(adapter);
        if (status) {
                adapter->vlans_added--;
-               adapter->vlan_tag[vid] = 0;
+               clear_bit(vid, adapter->vids);
        }
 
        return status;
@@ -1184,12 +1177,12 @@ static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
        if (lancer_chip(adapter) && vid == 0)
                goto ret;
 
-       adapter->vlan_tag[vid] = 0;
+       clear_bit(vid, adapter->vids);
        status = be_vid_config(adapter);
        if (!status)
                adapter->vlans_added--;
        else
-               adapter->vlan_tag[vid] = 1;
+               set_bit(vid, adapter->vids);
 ret:
        return status;
 }
@@ -1254,8 +1247,10 @@ static void be_set_rx_mode(struct net_device *netdev)
 
        /* Set to MCAST promisc mode if setting MULTICAST address fails */
        if (status) {
-               dev_info(&adapter->pdev->dev, "Exhausted multicast HW filters.\n");
-               dev_info(&adapter->pdev->dev, "Disabling HW multicast filtering.\n");
+               dev_info(&adapter->pdev->dev,
+                        "Exhausted multicast HW filters.\n");
+               dev_info(&adapter->pdev->dev,
+                        "Disabling HW multicast filtering.\n");
                be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
        }
 done:
@@ -1287,7 +1282,7 @@ static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
 
        if (status)
                dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
-                               mac, vf);
+                       mac, vf);
        else
                memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
 
@@ -1295,7 +1290,7 @@ static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
 }
 
 static int be_get_vf_config(struct net_device *netdev, int vf,
-                       struct ifla_vf_info *vi)
+                           struct ifla_vf_info *vi)
 {
        struct be_adapter *adapter = netdev_priv(netdev);
        struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
@@ -1316,8 +1311,7 @@ static int be_get_vf_config(struct net_device *netdev, int vf,
        return 0;
 }
 
-static int be_set_vf_vlan(struct net_device *netdev,
-                       int vf, u16 vlan, u8 qos)
+static int be_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
 {
        struct be_adapter *adapter = netdev_priv(netdev);
        struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
@@ -1348,8 +1342,7 @@ static int be_set_vf_vlan(struct net_device *netdev,
        return status;
 }
 
-static int be_set_vf_tx_rate(struct net_device *netdev,
-                       int vf, int rate)
+static int be_set_vf_tx_rate(struct net_device *netdev, int vf, int rate)
 {
        struct be_adapter *adapter = netdev_priv(netdev);
        int status = 0;
@@ -1369,7 +1362,7 @@ static int be_set_vf_tx_rate(struct net_device *netdev,
        status = be_cmd_config_qos(adapter, rate / 10, vf + 1);
        if (status)
                dev_err(&adapter->pdev->dev,
-                               "tx rate %d on VF %d failed\n", rate, vf);
+                       "tx rate %d on VF %d failed\n", rate, vf);
        else
                adapter->vf_cfg[vf].tx_rate = rate;
        return status;
@@ -1469,7 +1462,7 @@ modify_eqd:
 }
 
 static void be_rx_stats_update(struct be_rx_obj *rxo,
-               struct be_rx_compl_info *rxcp)
+                              struct be_rx_compl_info *rxcp)
 {
        struct be_rx_stats *stats = rx_stats(rxo);
 
@@ -1566,7 +1559,8 @@ static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
                skb_frag_set_page(skb, 0, page_info->page);
                skb_shinfo(skb)->frags[0].page_offset =
                                        page_info->page_offset + hdr_len;
-               skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
+               skb_frag_size_set(&skb_shinfo(skb)->frags[0],
+                                 curr_frag_len - hdr_len);
                skb->data_len = curr_frag_len - hdr_len;
                skb->truesize += rx_frag_size;
                skb->tail += hdr_len;
@@ -1725,8 +1719,8 @@ static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
        if (rxcp->vlanf) {
                rxcp->qnq = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, qnq,
                                          compl);
-               rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
-                                              compl);
+               rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1,
+                                              vlan_tag, compl);
        }
        rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
        rxcp->tunneled =
@@ -1757,8 +1751,8 @@ static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
        if (rxcp->vlanf) {
                rxcp->qnq = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, qnq,
                                          compl);
-               rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
-                                              compl);
+               rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0,
+                                              vlan_tag, compl);
        }
        rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
        rxcp->ip_frag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0,
@@ -1799,7 +1793,7 @@ static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
                        rxcp->vlan_tag = swab16(rxcp->vlan_tag);
 
                if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
-                   !adapter->vlan_tag[rxcp->vlan_tag])
+                   !test_bit(rxcp->vlan_tag, adapter->vids))
                        rxcp->vlanf = 0;
        }
 
@@ -1915,7 +1909,7 @@ static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
 }
 
 static u16 be_tx_compl_process(struct be_adapter *adapter,
-               struct be_tx_obj *txo, u16 last_index)
+                              struct be_tx_obj *txo, u16 last_index)
 {
        struct be_queue_info *txq = &txo->q;
        struct be_eth_wrb *wrb;
@@ -2122,7 +2116,7 @@ static int be_evt_queues_create(struct be_adapter *adapter)
 
                eq = &eqo->q;
                rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
-                                       sizeof(struct be_eq_entry));
+                                   sizeof(struct be_eq_entry));
                if (rc)
                        return rc;
 
@@ -2155,7 +2149,7 @@ static int be_mcc_queues_create(struct be_adapter *adapter)
 
        cq = &adapter->mcc_obj.cq;
        if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
-                       sizeof(struct be_mcc_compl)))
+                          sizeof(struct be_mcc_compl)))
                goto err;
 
        /* Use the default EQ for MCC completions */
@@ -2275,7 +2269,7 @@ static int be_rx_cqs_create(struct be_adapter *adapter)
                rxo->adapter = adapter;
                cq = &rxo->cq;
                rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
-                               sizeof(struct be_eth_rx_compl));
+                                   sizeof(struct be_eth_rx_compl));
                if (rc)
                        return rc;
 
@@ -2339,7 +2333,7 @@ static inline bool do_gro(struct be_rx_compl_info *rxcp)
 }
 
 static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
-                       int budget, int polling)
+                        int budget, int polling)
 {
        struct be_adapter *adapter = rxo->adapter;
        struct be_queue_info *rx_cq = &rxo->cq;
@@ -2365,7 +2359,7 @@ static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
                 * promiscuous mode on some skews
                 */
                if (unlikely(rxcp->port != adapter->port_num &&
-                               !lancer_chip(adapter))) {
+                            !lancer_chip(adapter))) {
                        be_rx_compl_discard(rxo, rxcp);
                        goto loop_continue;
                }
@@ -2405,8 +2399,9 @@ static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
                if (!txcp)
                        break;
                num_wrbs += be_tx_compl_process(adapter, txo,
-                               AMAP_GET_BITS(struct amap_eth_tx_compl,
-                                       wrb_index, txcp));
+                                               AMAP_GET_BITS(struct
+                                                             amap_eth_tx_compl,
+                                                             wrb_index, txcp));
        }
 
        if (work_done) {
@@ -2416,7 +2411,7 @@ static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
                /* As Tx wrbs have been freed up, wake up netdev queue
                 * if it was stopped due to lack of tx wrbs.  */
                if (__netif_subqueue_stopped(adapter->netdev, idx) &&
-                       atomic_read(&txo->q.used) < txo->q.len / 2) {
+                   atomic_read(&txo->q.used) < txo->q.len / 2) {
                        netif_wake_subqueue(adapter->netdev, idx);
                }
 
@@ -2510,9 +2505,9 @@ void be_detect_error(struct be_adapter *adapter)
                sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
                if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
                        sliport_err1 = ioread32(adapter->db +
-                                       SLIPORT_ERROR1_OFFSET);
+                                               SLIPORT_ERROR1_OFFSET);
                        sliport_err2 = ioread32(adapter->db +
-                                       SLIPORT_ERROR2_OFFSET);
+                                               SLIPORT_ERROR2_OFFSET);
                        adapter->hw_error = true;
                        /* Do not log error messages if its a FW reset */
                        if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 &&
@@ -2531,13 +2526,13 @@ void be_detect_error(struct be_adapter *adapter)
                }
        } else {
                pci_read_config_dword(adapter->pdev,
-                               PCICFG_UE_STATUS_LOW, &ue_lo);
+                                     PCICFG_UE_STATUS_LOW, &ue_lo);
                pci_read_config_dword(adapter->pdev,
-                               PCICFG_UE_STATUS_HIGH, &ue_hi);
+                                     PCICFG_UE_STATUS_HIGH, &ue_hi);
                pci_read_config_dword(adapter->pdev,
-                               PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
+                                     PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
                pci_read_config_dword(adapter->pdev,
-                               PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
+                                     PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
 
                ue_lo = (ue_lo & ~ue_lo_mask);
                ue_hi = (ue_hi & ~ue_hi_mask);
@@ -2624,7 +2619,7 @@ fail:
 }
 
 static inline int be_msix_vec_get(struct be_adapter *adapter,
-                               struct be_eq_obj *eqo)
+                                 struct be_eq_obj *eqo)
 {
        return adapter->msix_entries[eqo->msix_idx].vector;
 }
@@ -2648,7 +2643,7 @@ err_msix:
        for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
                free_irq(be_msix_vec_get(adapter, eqo), eqo);
        dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
-               status);
+                status);
        be_msix_disable(adapter);
        return status;
 }
@@ -2774,7 +2769,8 @@ static int be_rx_qs_create(struct be_adapter *adapter)
 {
        struct be_rx_obj *rxo;
        int rc, i, j;
-       u8 rsstable[128];
+       u8 rss_hkey[RSS_HASH_KEY_LEN];
+       struct rss_info *rss = &adapter->rss_info;
 
        for_all_rx_queues(adapter, rxo, i) {
                rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
@@ -2799,31 +2795,36 @@ static int be_rx_qs_create(struct be_adapter *adapter)
        }
 
        if (be_multi_rxq(adapter)) {
-               for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
+               for (j = 0; j < RSS_INDIR_TABLE_LEN;
+                       j += adapter->num_rx_qs - 1) {
                        for_all_rss_queues(adapter, rxo, i) {
-                               if ((j + i) >= 128)
+                               if ((j + i) >= RSS_INDIR_TABLE_LEN)
                                        break;
-                               rsstable[j + i] = rxo->rss_id;
+                               rss->rsstable[j + i] = rxo->rss_id;
+                               rss->rss_queue[j + i] = i;
                        }
                }
-               adapter->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
-                                       RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
+               rss->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
+                       RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
 
                if (!BEx_chip(adapter))
-                       adapter->rss_flags |= RSS_ENABLE_UDP_IPV4 |
-                                               RSS_ENABLE_UDP_IPV6;
+                       rss->rss_flags |= RSS_ENABLE_UDP_IPV4 |
+                               RSS_ENABLE_UDP_IPV6;
        } else {
                /* Disable RSS, if only default RX Q is created */
-               adapter->rss_flags = RSS_ENABLE_NONE;
+               rss->rss_flags = RSS_ENABLE_NONE;
        }
 
-       rc = be_cmd_rss_config(adapter, rsstable, adapter->rss_flags,
-                              128);
+       get_random_bytes(rss_hkey, RSS_HASH_KEY_LEN);
+       rc = be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags,
+                              128, rss_hkey);
        if (rc) {
-               adapter->rss_flags = RSS_ENABLE_NONE;
+               rss->rss_flags = RSS_ENABLE_NONE;
                return rc;
        }
 
+       memcpy(rss->rss_hkey, rss_hkey, RSS_HASH_KEY_LEN);
+
        /* First time posting */
        for_all_rx_queues(adapter, rxo, i)
                be_post_rx_frags(rxo, GFP_KERNEL);
@@ -2896,7 +2897,8 @@ static int be_setup_wol(struct be_adapter *adapter, bool enable)
 
        if (enable) {
                status = pci_write_config_dword(adapter->pdev,
-                       PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
+                                               PCICFG_PM_CONTROL_OFFSET,
+                                               PCICFG_PM_CONTROL_MASK);
                if (status) {
                        dev_err(&adapter->pdev->dev,
                                "Could not enable Wake-on-lan\n");
@@ -2905,7 +2907,8 @@ static int be_setup_wol(struct be_adapter *adapter, bool enable)
                        return status;
                }
                status = be_cmd_enable_magic_wol(adapter,
-                               adapter->netdev->dev_addr, &cmd);
+                                                adapter->netdev->dev_addr,
+                                                &cmd);
                pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
                pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
        } else {
@@ -2944,7 +2947,8 @@ static int be_vf_eth_addr_config(struct be_adapter *adapter)
 
                if (status)
                        dev_err(&adapter->pdev->dev,
-                       "Mac address assignment failed for VF %d\n", vf);
+                               "Mac address assignment failed for VF %d\n",
+                               vf);
                else
                        memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
 
@@ -3086,9 +3090,11 @@ static int be_vfs_if_create(struct be_adapter *adapter)
 
                /* If a FW profile exists, then cap_flags are updated */
                en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
-                          BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_MULTICAST);
-               status = be_cmd_if_create(adapter, cap_flags, en_flags,
-                                         &vf_cfg->if_handle, vf + 1);
+                                       BE_IF_FLAGS_BROADCAST |
+                                       BE_IF_FLAGS_MULTICAST);
+               status =
+                   be_cmd_if_create(adapter, cap_flags, en_flags,
+                                    &vf_cfg->if_handle, vf + 1);
                if (status)
                        goto err;
        }
@@ -3594,8 +3600,8 @@ static void be_netpoll(struct net_device *netdev)
 static char flash_cookie[2][16] =      {"*** SE FLAS", "H DIRECTORY *** "};
 
 static bool be_flash_redboot(struct be_adapter *adapter,
-                       const u8 *p, u32 img_start, int image_size,
-                       int hdr_size)
+                            const u8 *p, u32 img_start, int image_size,
+                            int hdr_size)
 {
        u32 crc_offset;
        u8 flashed_crc[4];
@@ -3605,11 +3611,10 @@ static bool be_flash_redboot(struct be_adapter *adapter,
 
        p += crc_offset;
 
-       status = be_cmd_get_flash_crc(adapter, flashed_crc,
-                       (image_size - 4));
+       status = be_cmd_get_flash_crc(adapter, flashed_crc, (image_size - 4));
        if (status) {
                dev_err(&adapter->pdev->dev,
-               "could not get crc from flash, not flashing redboot\n");
+                       "could not get crc from flash, not flashing redboot\n");
                return false;
        }
 
@@ -3649,8 +3654,8 @@ static bool is_comp_in_ufi(struct be_adapter *adapter,
 }
 
 static struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
-                                        int header_size,
-                                        const struct firmware *fw)
+                                               int header_size,
+                                               const struct firmware *fw)
 {
        struct flash_section_info *fsec = NULL;
        const u8 *p = fw->data;
@@ -3666,7 +3671,7 @@ static struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
 }
 
 static int be_flash(struct be_adapter *adapter, const u8 *img,
-               struct be_dma_mem *flash_cmd, int optype, int img_size)
+                   struct be_dma_mem *flash_cmd, int optype, int img_size)
 {
        u32 total_bytes = 0, flash_op, num_bytes = 0;
        int status = 0;
@@ -3693,7 +3698,7 @@ static int be_flash(struct be_adapter *adapter, const u8 *img,
                memcpy(req->data_buf, img, num_bytes);
                img += num_bytes;
                status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
-                                               flash_op, num_bytes);
+                                              flash_op, num_bytes);
                if (status) {
                        if (status == ILLEGAL_IOCTL_REQ &&
                            optype == OPTYPE_PHY_FW)
@@ -3708,10 +3713,8 @@ static int be_flash(struct be_adapter *adapter, const u8 *img,
 
 /* For BE2, BE3 and BE3-R */
 static int be_flash_BEx(struct be_adapter *adapter,
-                        const struct firmware *fw,
-                        struct be_dma_mem *flash_cmd,
-                        int num_of_images)
-
+                       const struct firmware *fw,
+                       struct be_dma_mem *flash_cmd, int num_of_images)
 {
        int status = 0, i, filehdr_size = 0;
        int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
@@ -3793,8 +3796,10 @@ static int be_flash_BEx(struct be_adapter *adapter,
 
                if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
                        redboot = be_flash_redboot(adapter, fw->data,
-                               pflashcomp[i].offset, pflashcomp[i].size,
-                               filehdr_size + img_hdrs_size);
+                                                  pflashcomp[i].offset,
+                                                  pflashcomp[i].size,
+                                                  filehdr_size +
+                                                  img_hdrs_size);
                        if (!redboot)
                                continue;
                }
@@ -3805,7 +3810,7 @@ static int be_flash_BEx(struct be_adapter *adapter,
                        return -1;
 
                status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
-                                       pflashcomp[i].size);
+                                 pflashcomp[i].size);
                if (status) {
                        dev_err(&adapter->pdev->dev,
                                "Flashing section type %d failed.\n",
@@ -3817,8 +3822,8 @@ static int be_flash_BEx(struct be_adapter *adapter,
 }
 
 static int be_flash_skyhawk(struct be_adapter *adapter,
-               const struct firmware *fw,
-               struct be_dma_mem *flash_cmd, int num_of_images)
+                           const struct firmware *fw,
+                           struct be_dma_mem *flash_cmd, int num_of_images)
 {
        int status = 0, i, filehdr_size = 0;
        int img_offset, img_size, img_optype, redboot;
@@ -3866,8 +3871,9 @@ static int be_flash_skyhawk(struct be_adapter *adapter,
 
                if (img_optype == OPTYPE_REDBOOT) {
                        redboot = be_flash_redboot(adapter, fw->data,
-                                       img_offset, img_size,
-                                       filehdr_size + img_hdrs_size);
+                                                  img_offset, img_size,
+                                                  filehdr_size +
+                                                  img_hdrs_size);
                        if (!redboot)
                                continue;
                }
@@ -3889,7 +3895,7 @@ static int be_flash_skyhawk(struct be_adapter *adapter,
 }
 
 static int lancer_fw_download(struct be_adapter *adapter,
-                               const struct firmware *fw)
+                             const struct firmware *fw)
 {
 #define LANCER_FW_DOWNLOAD_CHUNK      (32 * 1024)
 #define LANCER_FW_DOWNLOAD_LOCATION   "/prg"
@@ -3955,7 +3961,7 @@ static int lancer_fw_download(struct be_adapter *adapter,
        }
 
        dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
-                               flash_cmd.dma);
+                         flash_cmd.dma);
        if (status) {
                dev_err(&adapter->pdev->dev,
                        "Firmware load error. "
@@ -3976,9 +3982,8 @@ static int lancer_fw_download(struct be_adapter *adapter,
                        goto lancer_fw_exit;
                }
        } else if (change_status != LANCER_NO_RESET_NEEDED) {
-                       dev_err(&adapter->pdev->dev,
-                               "System reboot required for new FW"
-                               " to be active\n");
+               dev_err(&adapter->pdev->dev,
+                       "System reboot required for new FW to be active\n");
        }
 
        dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
@@ -4042,7 +4047,7 @@ static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
                        switch (ufi_type) {
                        case UFI_TYPE4:
                                status = be_flash_skyhawk(adapter, fw,
-                                                       &flash_cmd, num_imgs);
+                                                         &flash_cmd, num_imgs);
                                break;
                        case UFI_TYPE3R:
                                status = be_flash_BEx(adapter, fw, &flash_cmd,
@@ -4112,8 +4117,7 @@ fw_exit:
        return status;
 }
 
-static int be_ndo_bridge_setlink(struct net_device *dev,
-                                   struct nlmsghdr *nlh)
+static int be_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh)
 {
        struct be_adapter *adapter = netdev_priv(dev);
        struct nlattr *attr, *br_spec;
@@ -4155,8 +4159,7 @@ err:
 }
 
 static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
-                                   struct net_device *dev,
-                                   u32 filter_mask)
+                                struct net_device *dev, u32 filter_mask)
 {
        struct be_adapter *adapter = netdev_priv(dev);
        int status = 0;
@@ -4301,7 +4304,7 @@ static void be_netdev_init(struct net_device *netdev)
 
        netdev->netdev_ops = &be_netdev_ops;
 
-       SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
+       netdev->ethtool_ops = &be_ethtool_ops;
 }
 
 static void be_unmap_pci_bars(struct be_adapter *adapter)
@@ -4870,7 +4873,7 @@ static void be_shutdown(struct pci_dev *pdev)
 }
 
 static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
-                               pci_channel_state_t state)
+                                           pci_channel_state_t state)
 {
        struct be_adapter *adapter = pci_get_drvdata(pdev);
        struct net_device *netdev =  adapter->netdev;
index 68069eabc4f855c0636d12b69854e44919ffb863..c77fa4a6984458648a97960deeef2d0a54b6637c 100644 (file)
@@ -1210,7 +1210,7 @@ static int ftgmac100_probe(struct platform_device *pdev)
 
        SET_NETDEV_DEV(netdev, &pdev->dev);
 
-       SET_ETHTOOL_OPS(netdev, &ftgmac100_ethtool_ops);
+       netdev->ethtool_ops = &ftgmac100_ethtool_ops;
        netdev->netdev_ops = &ftgmac100_netdev_ops;
        netdev->features = NETIF_F_IP_CSUM | NETIF_F_GRO;
 
index 8be5b40c0a121331f1d1bbc0712499eb4bec8160..4ff1adc6bfcab9132b1dbf673626f868430bbcc3 100644 (file)
@@ -1085,7 +1085,7 @@ static int ftmac100_probe(struct platform_device *pdev)
        }
 
        SET_NETDEV_DEV(netdev, &pdev->dev);
-       SET_ETHTOOL_OPS(netdev, &ftmac100_ethtool_ops);
+       netdev->ethtool_ops = &ftmac100_ethtool_ops;
        netdev->netdev_ops = &ftmac100_netdev_ops;
 
        platform_set_drvdata(pdev, netdev);
index 8d69e439f0c518d4b3e46c9ae21d85e4013b7e06..cb5c987bee3932d924210094a0c5d0518b153aaa 100644 (file)
@@ -1255,6 +1255,49 @@ static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
        return 0;
 }
 
+static int fec_enet_clk_enable(struct net_device *ndev, bool enable)
+{
+       struct fec_enet_private *fep = netdev_priv(ndev);
+       int ret;
+
+       if (enable) {
+               ret = clk_prepare_enable(fep->clk_ahb);
+               if (ret)
+                       return ret;
+               ret = clk_prepare_enable(fep->clk_ipg);
+               if (ret)
+                       goto failed_clk_ipg;
+               if (fep->clk_enet_out) {
+                       ret = clk_prepare_enable(fep->clk_enet_out);
+                       if (ret)
+                               goto failed_clk_enet_out;
+               }
+               if (fep->clk_ptp) {
+                       ret = clk_prepare_enable(fep->clk_ptp);
+                       if (ret)
+                               goto failed_clk_ptp;
+               }
+       } else {
+               clk_disable_unprepare(fep->clk_ahb);
+               clk_disable_unprepare(fep->clk_ipg);
+               if (fep->clk_enet_out)
+                       clk_disable_unprepare(fep->clk_enet_out);
+               if (fep->clk_ptp)
+                       clk_disable_unprepare(fep->clk_ptp);
+       }
+
+       return 0;
+failed_clk_ptp:
+       if (fep->clk_enet_out)
+               clk_disable_unprepare(fep->clk_enet_out);
+failed_clk_enet_out:
+               clk_disable_unprepare(fep->clk_ipg);
+failed_clk_ipg:
+               clk_disable_unprepare(fep->clk_ahb);
+
+       return ret;
+}
+
 static int fec_enet_mii_probe(struct net_device *ndev)
 {
        struct fec_enet_private *fep = netdev_priv(ndev);
@@ -1364,7 +1407,7 @@ static int fec_enet_mii_init(struct platform_device *pdev)
         * Reference Manual has an error on this, and gets fixed on i.MX6Q
         * document.
         */
-       fep->phy_speed = DIV_ROUND_UP(clk_get_rate(fep->clk_ahb), 5000000);
+       fep->phy_speed = DIV_ROUND_UP(clk_get_rate(fep->clk_ipg), 5000000);
        if (id_entry->driver_data & FEC_QUIRK_ENET_MAC)
                fep->phy_speed--;
        fep->phy_speed <<= 1;
@@ -1773,6 +1816,10 @@ fec_enet_open(struct net_device *ndev)
        struct fec_enet_private *fep = netdev_priv(ndev);
        int ret;
 
+       ret = fec_enet_clk_enable(ndev, true);
+       if (ret)
+               return ret;
+
        /* I should reset the ring buffers here, but I don't yet know
         * a simple way to do that.
         */
@@ -1811,6 +1858,7 @@ fec_enet_close(struct net_device *ndev)
                phy_disconnect(fep->phy_dev);
        }
 
+       fec_enet_clk_enable(ndev, false);
        fec_enet_free_buffers(ndev);
 
        return 0;
@@ -2164,26 +2212,10 @@ fec_probe(struct platform_device *pdev)
                fep->bufdesc_ex = 0;
        }
 
-       ret = clk_prepare_enable(fep->clk_ahb);
+       ret = fec_enet_clk_enable(ndev, true);
        if (ret)
                goto failed_clk;
 
-       ret = clk_prepare_enable(fep->clk_ipg);
-       if (ret)
-               goto failed_clk_ipg;
-
-       if (fep->clk_enet_out) {
-               ret = clk_prepare_enable(fep->clk_enet_out);
-               if (ret)
-                       goto failed_clk_enet_out;
-       }
-
-       if (fep->clk_ptp) {
-               ret = clk_prepare_enable(fep->clk_ptp);
-               if (ret)
-                       goto failed_clk_ptp;
-       }
-
        fep->reg_phy = devm_regulator_get(&pdev->dev, "phy");
        if (!IS_ERR(fep->reg_phy)) {
                ret = regulator_enable(fep->reg_phy);
@@ -2225,6 +2257,7 @@ fec_probe(struct platform_device *pdev)
 
        /* Carrier starts down, phylib will bring it up */
        netif_carrier_off(ndev);
+       fec_enet_clk_enable(ndev, false);
 
        ret = register_netdev(ndev);
        if (ret)
@@ -2244,15 +2277,7 @@ failed_init:
        if (fep->reg_phy)
                regulator_disable(fep->reg_phy);
 failed_regulator:
-       if (fep->clk_ptp)
-               clk_disable_unprepare(fep->clk_ptp);
-failed_clk_ptp:
-       if (fep->clk_enet_out)
-               clk_disable_unprepare(fep->clk_enet_out);
-failed_clk_enet_out:
-       clk_disable_unprepare(fep->clk_ipg);
-failed_clk_ipg:
-       clk_disable_unprepare(fep->clk_ahb);
+       fec_enet_clk_enable(ndev, false);
 failed_clk:
 failed_ioremap:
        free_netdev(ndev);
@@ -2272,14 +2297,9 @@ fec_drv_remove(struct platform_device *pdev)
        del_timer_sync(&fep->time_keep);
        if (fep->reg_phy)
                regulator_disable(fep->reg_phy);
-       if (fep->clk_ptp)
-               clk_disable_unprepare(fep->clk_ptp);
        if (fep->ptp_clock)
                ptp_clock_unregister(fep->ptp_clock);
-       if (fep->clk_enet_out)
-               clk_disable_unprepare(fep->clk_enet_out);
-       clk_disable_unprepare(fep->clk_ipg);
-       clk_disable_unprepare(fep->clk_ahb);
+       fec_enet_clk_enable(ndev, false);
        free_netdev(ndev);
 
        return 0;
@@ -2296,12 +2316,7 @@ fec_suspend(struct device *dev)
                fec_stop(ndev);
                netif_device_detach(ndev);
        }
-       if (fep->clk_ptp)
-               clk_disable_unprepare(fep->clk_ptp);
-       if (fep->clk_enet_out)
-               clk_disable_unprepare(fep->clk_enet_out);
-       clk_disable_unprepare(fep->clk_ipg);
-       clk_disable_unprepare(fep->clk_ahb);
+       fec_enet_clk_enable(ndev, false);
 
        if (fep->reg_phy)
                regulator_disable(fep->reg_phy);
@@ -2322,25 +2337,9 @@ fec_resume(struct device *dev)
                        return ret;
        }
 
-       ret = clk_prepare_enable(fep->clk_ahb);
-       if (ret)
-               goto failed_clk_ahb;
-
-       ret = clk_prepare_enable(fep->clk_ipg);
+       ret = fec_enet_clk_enable(ndev, true);
        if (ret)
-               goto failed_clk_ipg;
-
-       if (fep->clk_enet_out) {
-               ret = clk_prepare_enable(fep->clk_enet_out);
-               if (ret)
-                       goto failed_clk_enet_out;
-       }
-
-       if (fep->clk_ptp) {
-               ret = clk_prepare_enable(fep->clk_ptp);
-               if (ret)
-                       goto failed_clk_ptp;
-       }
+               goto failed_clk;
 
        if (netif_running(ndev)) {
                fec_restart(ndev, fep->full_duplex);
@@ -2349,14 +2348,7 @@ fec_resume(struct device *dev)
 
        return 0;
 
-failed_clk_ptp:
-       if (fep->clk_enet_out)
-               clk_disable_unprepare(fep->clk_enet_out);
-failed_clk_enet_out:
-       clk_disable_unprepare(fep->clk_ipg);
-failed_clk_ipg:
-       clk_disable_unprepare(fep->clk_ahb);
-failed_clk_ahb:
+failed_clk:
        if (fep->reg_phy)
                regulator_disable(fep->reg_phy);
        return ret;
index dc80db41d6b3397388b0210283c4c7fd3ce07680..cfaf17b70f3fc5d6ab7a11a81266d67267646f33 100644 (file)
@@ -791,10 +791,6 @@ static int fs_init_phy(struct net_device *dev)
 
        phydev = of_phy_connect(dev, fep->fpi->phy_node, &fs_adjust_link, 0,
                                iface);
-       if (!phydev) {
-               phydev = of_phy_connect_fixed_link(dev, &fs_adjust_link,
-                                                  iface);
-       }
        if (!phydev) {
                dev_err(&dev->dev, "Could not attach to PHY\n");
                return -ENODEV;
@@ -1029,9 +1025,16 @@ static int fs_enet_probe(struct platform_device *ofdev)
        fpi->use_napi = 1;
        fpi->napi_weight = 17;
        fpi->phy_node = of_parse_phandle(ofdev->dev.of_node, "phy-handle", 0);
-       if ((!fpi->phy_node) && (!of_get_property(ofdev->dev.of_node, "fixed-link",
-                                                 NULL)))
-               goto out_free_fpi;
+       if (!fpi->phy_node && of_phy_is_fixed_link(ofdev->dev.of_node)) {
+               err = of_phy_register_fixed_link(ofdev->dev.of_node);
+               if (err)
+                       goto out_free_fpi;
+
+               /* In the case of a fixed PHY, the DT node associated
+                * to the PHY is the Ethernet MAC DT node.
+                */
+               fpi->phy_node = ofdev->dev.of_node;
+       }
 
        if (of_device_is_compatible(ofdev->dev.of_node, "fsl,mpc5125-fec")) {
                phy_connection_type = of_get_property(ofdev->dev.of_node,
index 9125d9abf0998d31e3179bd9c712af487855d5a9..282674027c92b012aedd35d980b3e69a4b96fd4a 100644 (file)
@@ -121,6 +121,7 @@ static irqreturn_t gfar_error(int irq, void *dev_id);
 static irqreturn_t gfar_transmit(int irq, void *dev_id);
 static irqreturn_t gfar_interrupt(int irq, void *dev_id);
 static void adjust_link(struct net_device *dev);
+static noinline void gfar_update_link_state(struct gfar_private *priv);
 static int init_phy(struct net_device *dev);
 static int gfar_probe(struct platform_device *ofdev);
 static int gfar_remove(struct platform_device *ofdev);
@@ -888,6 +889,17 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
 
        priv->phy_node = of_parse_phandle(np, "phy-handle", 0);
 
+       /* In the case of a fixed PHY, the DT node associated
+        * to the PHY is the Ethernet MAC DT node.
+        */
+       if (of_phy_is_fixed_link(np)) {
+               err = of_phy_register_fixed_link(np);
+               if (err)
+                       goto err_grp_init;
+
+               priv->phy_node = np;
+       }
+
        /* Find the TBI PHY.  If it's not there, we don't support SGMII */
        priv->tbi_node = of_parse_phandle(np, "tbi-handle", 0);
 
@@ -1659,9 +1671,6 @@ static int init_phy(struct net_device *dev)
 
        priv->phydev = of_phy_connect(dev, priv->phy_node, &adjust_link, 0,
                                      interface);
-       if (!priv->phydev)
-               priv->phydev = of_phy_connect_fixed_link(dev, &adjust_link,
-                                                        interface);
        if (!priv->phydev) {
                dev_err(&dev->dev, "could not attach to PHY\n");
                return -ENODEV;
@@ -3076,41 +3085,6 @@ static irqreturn_t gfar_interrupt(int irq, void *grp_id)
        return IRQ_HANDLED;
 }
 
-static u32 gfar_get_flowctrl_cfg(struct gfar_private *priv)
-{
-       struct phy_device *phydev = priv->phydev;
-       u32 val = 0;
-
-       if (!phydev->duplex)
-               return val;
-
-       if (!priv->pause_aneg_en) {
-               if (priv->tx_pause_en)
-                       val |= MACCFG1_TX_FLOW;
-               if (priv->rx_pause_en)
-                       val |= MACCFG1_RX_FLOW;
-       } else {
-               u16 lcl_adv, rmt_adv;
-               u8 flowctrl;
-               /* get link partner capabilities */
-               rmt_adv = 0;
-               if (phydev->pause)
-                       rmt_adv = LPA_PAUSE_CAP;
-               if (phydev->asym_pause)
-                       rmt_adv |= LPA_PAUSE_ASYM;
-
-               lcl_adv = mii_advertise_flowctrl(phydev->advertising);
-
-               flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
-               if (flowctrl & FLOW_CTRL_TX)
-                       val |= MACCFG1_TX_FLOW;
-               if (flowctrl & FLOW_CTRL_RX)
-                       val |= MACCFG1_RX_FLOW;
-       }
-
-       return val;
-}
-
 /* Called every time the controller might need to be made
  * aware of new link state.  The PHY code conveys this
  * information through variables in the phydev structure, and this
@@ -3120,83 +3094,12 @@ static u32 gfar_get_flowctrl_cfg(struct gfar_private *priv)
 static void adjust_link(struct net_device *dev)
 {
        struct gfar_private *priv = netdev_priv(dev);
-       struct gfar __iomem *regs = priv->gfargrp[0].regs;
        struct phy_device *phydev = priv->phydev;
-       int new_state = 0;
-
-       if (test_bit(GFAR_RESETTING, &priv->state))
-               return;
-
-       if (phydev->link) {
-               u32 tempval1 = gfar_read(&regs->maccfg1);
-               u32 tempval = gfar_read(&regs->maccfg2);
-               u32 ecntrl = gfar_read(&regs->ecntrl);
-
-               /* Now we make sure that we can be in full duplex mode.
-                * If not, we operate in half-duplex mode.
-                */
-               if (phydev->duplex != priv->oldduplex) {
-                       new_state = 1;
-                       if (!(phydev->duplex))
-                               tempval &= ~(MACCFG2_FULL_DUPLEX);
-                       else
-                               tempval |= MACCFG2_FULL_DUPLEX;
-
-                       priv->oldduplex = phydev->duplex;
-               }
-
-               if (phydev->speed != priv->oldspeed) {
-                       new_state = 1;
-                       switch (phydev->speed) {
-                       case 1000:
-                               tempval =
-                                   ((tempval & ~(MACCFG2_IF)) | MACCFG2_GMII);
-
-                               ecntrl &= ~(ECNTRL_R100);
-                               break;
-                       case 100:
-                       case 10:
-                               tempval =
-                                   ((tempval & ~(MACCFG2_IF)) | MACCFG2_MII);
-
-                               /* Reduced mode distinguishes
-                                * between 10 and 100
-                                */
-                               if (phydev->speed == SPEED_100)
-                                       ecntrl |= ECNTRL_R100;
-                               else
-                                       ecntrl &= ~(ECNTRL_R100);
-                               break;
-                       default:
-                               netif_warn(priv, link, dev,
-                                          "Ack!  Speed (%d) is not 10/100/1000!\n",
-                                          phydev->speed);
-                               break;
-                       }
-
-                       priv->oldspeed = phydev->speed;
-               }
-
-               tempval1 &= ~(MACCFG1_TX_FLOW | MACCFG1_RX_FLOW);
-               tempval1 |= gfar_get_flowctrl_cfg(priv);
-
-               gfar_write(&regs->maccfg1, tempval1);
-               gfar_write(&regs->maccfg2, tempval);
-               gfar_write(&regs->ecntrl, ecntrl);
-
-               if (!priv->oldlink) {
-                       new_state = 1;
-                       priv->oldlink = 1;
-               }
-       } else if (priv->oldlink) {
-               new_state = 1;
-               priv->oldlink = 0;
-               priv->oldspeed = 0;
-               priv->oldduplex = -1;
-       }
 
-       if (new_state && netif_msg_link(priv))
-               phy_print_status(phydev);
+       if (unlikely(phydev->link != priv->oldlink ||
+                    phydev->duplex != priv->oldduplex ||
+                    phydev->speed != priv->oldspeed))
+               gfar_update_link_state(priv);
 }
 
 /* Update the hash table based on the current list of multicast
@@ -3442,6 +3345,114 @@ static irqreturn_t gfar_error(int irq, void *grp_id)
        return IRQ_HANDLED;
 }
 
+static u32 gfar_get_flowctrl_cfg(struct gfar_private *priv)
+{
+       struct phy_device *phydev = priv->phydev;
+       u32 val = 0;
+
+       if (!phydev->duplex)
+               return val;
+
+       if (!priv->pause_aneg_en) {
+               if (priv->tx_pause_en)
+                       val |= MACCFG1_TX_FLOW;
+               if (priv->rx_pause_en)
+                       val |= MACCFG1_RX_FLOW;
+       } else {
+               u16 lcl_adv, rmt_adv;
+               u8 flowctrl;
+               /* get link partner capabilities */
+               rmt_adv = 0;
+               if (phydev->pause)
+                       rmt_adv = LPA_PAUSE_CAP;
+               if (phydev->asym_pause)
+                       rmt_adv |= LPA_PAUSE_ASYM;
+
+               lcl_adv = mii_advertise_flowctrl(phydev->advertising);
+
+               flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
+               if (flowctrl & FLOW_CTRL_TX)
+                       val |= MACCFG1_TX_FLOW;
+               if (flowctrl & FLOW_CTRL_RX)
+                       val |= MACCFG1_RX_FLOW;
+       }
+
+       return val;
+}
+
+static noinline void gfar_update_link_state(struct gfar_private *priv)
+{
+       struct gfar __iomem *regs = priv->gfargrp[0].regs;
+       struct phy_device *phydev = priv->phydev;
+
+       if (unlikely(test_bit(GFAR_RESETTING, &priv->state)))
+               return;
+
+       if (phydev->link) {
+               u32 tempval1 = gfar_read(&regs->maccfg1);
+               u32 tempval = gfar_read(&regs->maccfg2);
+               u32 ecntrl = gfar_read(&regs->ecntrl);
+
+               if (phydev->duplex != priv->oldduplex) {
+                       if (!(phydev->duplex))
+                               tempval &= ~(MACCFG2_FULL_DUPLEX);
+                       else
+                               tempval |= MACCFG2_FULL_DUPLEX;
+
+                       priv->oldduplex = phydev->duplex;
+               }
+
+               if (phydev->speed != priv->oldspeed) {
+                       switch (phydev->speed) {
+                       case 1000:
+                               tempval =
+                                   ((tempval & ~(MACCFG2_IF)) | MACCFG2_GMII);
+
+                               ecntrl &= ~(ECNTRL_R100);
+                               break;
+                       case 100:
+                       case 10:
+                               tempval =
+                                   ((tempval & ~(MACCFG2_IF)) | MACCFG2_MII);
+
+                               /* Reduced mode distinguishes
+                                * between 10 and 100
+                                */
+                               if (phydev->speed == SPEED_100)
+                                       ecntrl |= ECNTRL_R100;
+                               else
+                                       ecntrl &= ~(ECNTRL_R100);
+                               break;
+                       default:
+                               netif_warn(priv, link, priv->ndev,
+                                          "Ack!  Speed (%d) is not 10/100/1000!\n",
+                                          phydev->speed);
+                               break;
+                       }
+
+                       priv->oldspeed = phydev->speed;
+               }
+
+               tempval1 &= ~(MACCFG1_TX_FLOW | MACCFG1_RX_FLOW);
+               tempval1 |= gfar_get_flowctrl_cfg(priv);
+
+               gfar_write(&regs->maccfg1, tempval1);
+               gfar_write(&regs->maccfg2, tempval);
+               gfar_write(&regs->ecntrl, ecntrl);
+
+               if (!priv->oldlink)
+                       priv->oldlink = 1;
+
+       } else if (priv->oldlink) {
+               priv->oldlink = 0;
+               priv->oldspeed = 0;
+               priv->oldduplex = -1;
+       }
+
+       if (netif_msg_link(priv))
+               phy_print_status(phydev);
+}
+
 static struct of_device_id gfar_match[] =
 {
        {
index 891dbee6e6c14d2394cc2dff00092f448faf3dc2..76d70708f864af66b4a525e671f021e0d30a2b7d 100644 (file)
@@ -533,6 +533,9 @@ static int gfar_spauseparam(struct net_device *dev,
        struct gfar __iomem *regs = priv->gfargrp[0].regs;
        u32 oldadv, newadv;
 
+       if (!phydev)
+               return -ENODEV;
+
        if (!(phydev->supported & SUPPORTED_Pause) ||
            (!(phydev->supported & SUPPORTED_Asym_Pause) &&
             (epause->rx_pause != epause->tx_pause)))
index c8299c31b21f9f5c52dc380f9b867597c1b8bcdf..fab39e2954410106f9c26304f73c29169c1d35a1 100644 (file)
@@ -1728,9 +1728,6 @@ static int init_phy(struct net_device *dev)
 
        phydev = of_phy_connect(dev, ug_info->phy_node, &adjust_link, 0,
                                priv->phy_interface);
-       if (!phydev)
-               phydev = of_phy_connect_fixed_link(dev, &adjust_link,
-                                                  priv->phy_interface);
        if (!phydev) {
                dev_err(&dev->dev, "Could not attach to PHY\n");
                return -ENODEV;
@@ -3790,6 +3787,17 @@ static int ucc_geth_probe(struct platform_device* ofdev)
        ug_info->uf_info.irq = irq_of_parse_and_map(np, 0);
 
        ug_info->phy_node = of_parse_phandle(np, "phy-handle", 0);
+       if (!ug_info->phy_node) {
+               /* In the case of a fixed PHY, the DT node associated
+                * to the PHY is the Ethernet MAC DT node.
+                */
+               if (of_phy_is_fixed_link(np)) {
+                       err = of_phy_register_fixed_link(np);
+                       if (err)
+                               return err;
+               }
+               ug_info->phy_node = np;
+       }
 
        /* Find the TBI PHY node.  If it's not there, we don't support SGMII */
        ug_info->tbi_node = of_parse_phandle(np, "tbi-handle", 0);
index 413329eff2ffc05f80197d05d16a9c4332821b6b..cc83350d56ba1c05aa7eb619b607635f76de6400 100644 (file)
@@ -417,5 +417,5 @@ static const struct ethtool_ops uec_ethtool_ops = {
 
 void uec_set_ethtool_ops(struct net_device *netdev)
 {
-       SET_ETHTOOL_OPS(netdev, &uec_ethtool_ops);
+       netdev->ethtool_ops = &uec_ethtool_ops;
 }
index 7becab1aa3e43b41c8f4b942cce18d9c584152fd..cfe7a74317307f8ef1ef39acd2c78c97e903d423 100644 (file)
@@ -256,7 +256,7 @@ static int fmvj18x_probe(struct pcmcia_device *link)
     dev->netdev_ops = &fjn_netdev_ops;
     dev->watchdog_timeo = TX_TIMEOUT;
 
-    SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
+    dev->ethtool_ops = &netdev_ethtool_ops;
 
     return fmvj18x_config(link);
 } /* fmvj18x_attach */
index 95837b99a464865a7ca47273cb49ce9b3200136b..6055e3eaf49c860eff05450d4135f11b9d52a0a4 100644 (file)
@@ -278,5 +278,5 @@ static const struct ethtool_ops ehea_ethtool_ops = {
 
 void ehea_set_ethtool_ops(struct net_device *netdev)
 {
-       SET_ETHTOOL_OPS(netdev, &ehea_ethtool_ops);
+       netdev->ethtool_ops = &ehea_ethtool_ops;
 }
index 9b03033bb5576f52d7c6f687f9e1a99c8e7a21a5..a0820f72b25c88bf9141354231813864c8159fec 100644 (file)
@@ -103,12 +103,14 @@ out_nomem:
 
 static void hw_queue_dtor(struct hw_queue *queue)
 {
-       int pages_per_kpage = PAGE_SIZE / queue->pagesize;
+       int pages_per_kpage;
        int i, nr_pages;
 
        if (!queue || !queue->queue_pages)
                return;
 
+       pages_per_kpage = PAGE_SIZE / queue->pagesize;
+
        nr_pages = queue->queue_length / queue->pagesize;
 
        for (i = 0; i < nr_pages; i += pages_per_kpage)
index ae342fdb42c8e79853507007ad2ea6c8d3681fad..87bd953cc2eeaef7f6af65902841645e98f7eda5 100644 (file)
@@ -2879,7 +2879,7 @@ static int emac_probe(struct platform_device *ofdev)
                dev->commac.ops = &emac_commac_sg_ops;
        } else
                ndev->netdev_ops = &emac_netdev_ops;
-       SET_ETHTOOL_OPS(ndev, &emac_ethtool_ops);
+       ndev->ethtool_ops = &emac_ethtool_ops;
 
        netif_carrier_off(ndev);
 
index 25045ae071711f03cee9ccabf9898cf2abebbc21..5727779a7df27477bee22c8bad9ac8253b083aea 100644 (file)
@@ -2245,7 +2245,7 @@ static int ipg_probe(struct pci_dev *pdev, const struct pci_device_id *id)
         */
        dev->netdev_ops = &ipg_netdev_ops;
        SET_NETDEV_DEV(dev, &pdev->dev);
-       SET_ETHTOOL_OPS(dev, &ipg_ethtool_ops);
+       dev->ethtool_ops = &ipg_ethtool_ops;
 
        rc = pci_request_regions(pdev, DRV_NAME);
        if (rc)
index b56461ce674c7832152cdab4c0b4c3b27789d9a4..9d979d7debef0fd1cb3ec80ca3dd8d908b6cd33b 100644 (file)
@@ -2854,7 +2854,7 @@ static int e100_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        netdev->hw_features |= NETIF_F_RXALL;
 
        netdev->netdev_ops = &e100_netdev_ops;
-       SET_ETHTOOL_OPS(netdev, &e100_ethtool_ops);
+       netdev->ethtool_ops = &e100_ethtool_ops;
        netdev->watchdog_timeo = E100_WATCHDOG_PERIOD;
        strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
 
index 73a8aeefb92a46d13a4c73be6cade2c5d694e00f..341889a4ef7f93f76fb4d845a592f015f7bd2d70 100644 (file)
@@ -1905,5 +1905,5 @@ static const struct ethtool_ops e1000_ethtool_ops = {
 
 void e1000_set_ethtool_ops(struct net_device *netdev)
 {
-       SET_ETHTOOL_OPS(netdev, &e1000_ethtool_ops);
+       netdev->ethtool_ops = &e1000_ethtool_ops;
 }
index 1471c5464a89e72d87aa571d4a1b15d791a3f015..e27e60910949c95a5217cad20bfc722e35527cc5 100644 (file)
@@ -265,10 +265,10 @@ struct e1000_adapter {
        u32 tx_hwtstamp_timeouts;
 
        /* Rx */
-       bool (*clean_rx) (struct e1000_ring *ring, int *work_done,
-                         int work_to_do) ____cacheline_aligned_in_smp;
-       void (*alloc_rx_buf) (struct e1000_ring *ring, int cleaned_count,
-                             gfp_t gfp);
+       bool (*clean_rx)(struct e1000_ring *ring, int *work_done,
+                        int work_to_do) ____cacheline_aligned_in_smp;
+       void (*alloc_rx_buf)(struct e1000_ring *ring, int cleaned_count,
+                            gfp_t gfp);
        struct e1000_ring *rx_ring;
 
        u32 rx_int_delay;
index cad250bc1b99fc81d51fb8956eee74c9acc3bc7e..e9a48bb5caacb8e73e5ebb8eeb8baafbbf2f8679 100644 (file)
@@ -169,6 +169,7 @@ static int e1000_get_settings(struct net_device *netdev,
                }
        } else if (!pm_runtime_suspended(netdev->dev.parent)) {
                u32 status = er32(STATUS);
+
                if (status & E1000_STATUS_LU) {
                        if (status & E1000_STATUS_SPEED_1000)
                                speed = SPEED_1000;
@@ -783,25 +784,26 @@ static bool reg_pattern_test(struct e1000_adapter *adapter, u64 *data,
                              reg + (offset << 2), val,
                              (test[pat] & write & mask));
                        *data = reg;
-                       return 1;
+                       return true;
                }
        }
-       return 0;
+       return false;
 }
 
 static bool reg_set_and_check(struct e1000_adapter *adapter, u64 *data,
                              int reg, u32 mask, u32 write)
 {
        u32 val;
+
        __ew32(&adapter->hw, reg, write & mask);
        val = __er32(&adapter->hw, reg);
        if ((write & mask) != (val & mask)) {
                e_err("set/check test failed (reg 0x%05X): got 0x%08X expected 0x%08X\n",
                      reg, (val & mask), (write & mask));
                *data = reg;
-               return 1;
+               return true;
        }
-       return 0;
+       return false;
 }
 
 #define REG_PATTERN_TEST_ARRAY(reg, offset, mask, write)                       \
@@ -1717,6 +1719,7 @@ static int e1000_link_test(struct e1000_adapter *adapter, u64 *data)
        *data = 0;
        if (hw->phy.media_type == e1000_media_type_internal_serdes) {
                int i = 0;
+
                hw->mac.serdes_has_link = false;
 
                /* On some blade server designs, link establishment
@@ -2315,5 +2318,5 @@ static const struct ethtool_ops e1000_ethtool_ops = {
 
 void e1000e_set_ethtool_ops(struct net_device *netdev)
 {
-       SET_ETHTOOL_OPS(netdev, &e1000_ethtool_ops);
+       netdev->ethtool_ops = &e1000_ethtool_ops;
 }
index 9866f264f55e33a8e564757730ada0d6ab7c6a92..5f55395616612d843b10a9b7f6ad0345f15344fb 100644 (file)
@@ -186,7 +186,7 @@ static bool e1000_phy_is_accessible_pchlan(struct e1000_hw *hw)
 {
        u16 phy_reg = 0;
        u32 phy_id = 0;
-       s32 ret_val;
+       s32 ret_val = 0;
        u16 retry_count;
        u32 mac_reg = 0;
 
@@ -217,11 +217,13 @@ static bool e1000_phy_is_accessible_pchlan(struct e1000_hw *hw)
        /* In case the PHY needs to be in mdio slow mode,
         * set slow mode and try to get the PHY id again.
         */
-       hw->phy.ops.release(hw);
-       ret_val = e1000_set_mdio_slow_mode_hv(hw);
-       if (!ret_val)
-               ret_val = e1000e_get_phy_id(hw);
-       hw->phy.ops.acquire(hw);
+       if (hw->mac.type < e1000_pch_lpt) {
+               hw->phy.ops.release(hw);
+               ret_val = e1000_set_mdio_slow_mode_hv(hw);
+               if (!ret_val)
+                       ret_val = e1000e_get_phy_id(hw);
+               hw->phy.ops.acquire(hw);
+       }
 
        if (ret_val)
                return false;
@@ -842,6 +844,17 @@ s32 e1000_set_eee_pchlan(struct e1000_hw *hw)
                }
        }
 
+       if (hw->phy.type == e1000_phy_82579) {
+               ret_val = e1000_read_emi_reg_locked(hw, I82579_LPI_PLL_SHUT,
+                                                   &data);
+               if (ret_val)
+                       goto release;
+
+               data &= ~I82579_LPI_100_PLL_SHUT;
+               ret_val = e1000_write_emi_reg_locked(hw, I82579_LPI_PLL_SHUT,
+                                                    data);
+       }
+
        /* R/Clr IEEE MMD 3.1 bits 11:10 - Tx/Rx LPI Received */
        ret_val = e1000_read_emi_reg_locked(hw, pcs_status, &data);
        if (ret_val)
@@ -1314,14 +1327,18 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
                        return ret_val;
        }
 
-       /* When connected at 10Mbps half-duplex, 82579 parts are excessively
+       /* When connected at 10Mbps half-duplex, some parts are excessively
         * aggressive resulting in many collisions. To avoid this, increase
         * the IPG and reduce Rx latency in the PHY.
         */
-       if ((hw->mac.type == e1000_pch2lan) && link) {
+       if (((hw->mac.type == e1000_pch2lan) ||
+            (hw->mac.type == e1000_pch_lpt)) && link) {
                u32 reg;
+
                reg = er32(STATUS);
                if (!(reg & (E1000_STATUS_FD | E1000_STATUS_SPEED_MASK))) {
+                       u16 emi_addr;
+
                        reg = er32(TIPG);
                        reg &= ~E1000_TIPG_IPGT_MASK;
                        reg |= 0xFF;
@@ -1332,8 +1349,12 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
                        if (ret_val)
                                return ret_val;
 
-                       ret_val =
-                           e1000_write_emi_reg_locked(hw, I82579_RX_CONFIG, 0);
+                       if (hw->mac.type == e1000_pch2lan)
+                               emi_addr = I82579_RX_CONFIG;
+                       else
+                               emi_addr = I217_RX_CONFIG;
+
+                       ret_val = e1000_write_emi_reg_locked(hw, emi_addr, 0);
 
                        hw->phy.ops.release(hw);
 
@@ -2493,51 +2514,44 @@ release:
  *  e1000_k1_gig_workaround_lv - K1 Si workaround
  *  @hw:   pointer to the HW structure
  *
- *  Workaround to set the K1 beacon duration for 82579 parts
+ *  Workaround to set the K1 beacon duration for 82579 parts in 10Mbps
+ *  Disable K1 in 1000Mbps and 100Mbps
  **/
 static s32 e1000_k1_workaround_lv(struct e1000_hw *hw)
 {
        s32 ret_val = 0;
        u16 status_reg = 0;
-       u32 mac_reg;
-       u16 phy_reg;
 
        if (hw->mac.type != e1000_pch2lan)
                return 0;
 
-       /* Set K1 beacon duration based on 1Gbps speed or otherwise */
+       /* Set K1 beacon duration based on 10Mbs speed */
        ret_val = e1e_rphy(hw, HV_M_STATUS, &status_reg);
        if (ret_val)
                return ret_val;
 
        if ((status_reg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE))
            == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) {
-               mac_reg = er32(FEXTNVM4);
-               mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK;
-
-               ret_val = e1e_rphy(hw, I82579_LPI_CTRL, &phy_reg);
-               if (ret_val)
-                       return ret_val;
-
-               if (status_reg & HV_M_STATUS_SPEED_1000) {
+               if (status_reg &
+                   (HV_M_STATUS_SPEED_1000 | HV_M_STATUS_SPEED_100)) {
                        u16 pm_phy_reg;
 
-                       mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_8USEC;
-                       phy_reg &= ~I82579_LPI_CTRL_FORCE_PLL_LOCK_COUNT;
-                       /* LV 1G Packet drop issue wa  */
+                       /* LV 1G/100 Packet drop issue wa  */
                        ret_val = e1e_rphy(hw, HV_PM_CTRL, &pm_phy_reg);
                        if (ret_val)
                                return ret_val;
-                       pm_phy_reg &= ~HV_PM_CTRL_PLL_STOP_IN_K1_GIGA;
+                       pm_phy_reg &= ~HV_PM_CTRL_K1_ENABLE;
                        ret_val = e1e_wphy(hw, HV_PM_CTRL, pm_phy_reg);
                        if (ret_val)
                                return ret_val;
                } else {
+                       u32 mac_reg;
+
+                       mac_reg = er32(FEXTNVM4);
+                       mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK;
                        mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_16USEC;
-                       phy_reg |= I82579_LPI_CTRL_FORCE_PLL_LOCK_COUNT;
+                       ew32(FEXTNVM4, mac_reg);
                }
-               ew32(FEXTNVM4, mac_reg);
-               ret_val = e1e_wphy(hw, I82579_LPI_CTRL, phy_reg);
        }
 
        return ret_val;
index bead50f9187b527291596da67351339b64482707..5515126c81c199b5e44bd909ac9ea3f336cf4fcd 100644 (file)
 #define I82577_MSE_THRESHOLD   0x0887  /* 82577 Mean Square Error Threshold */
 #define I82579_MSE_LINK_DOWN   0x2411  /* MSE count before dropping link */
 #define I82579_RX_CONFIG               0x3412  /* Receive configuration */
+#define I82579_LPI_PLL_SHUT            0x4412  /* LPI PLL Shut Enable */
 #define I82579_EEE_PCS_STATUS          0x182E  /* IEEE MMD Register 3.1 >> 8 */
 #define I82579_EEE_CAPABILITY          0x0410  /* IEEE MMD Register 3.20 */
 #define I82579_EEE_ADVERTISEMENT       0x040E  /* IEEE MMD Register 7.60 */
 #define I82579_EEE_LP_ABILITY          0x040F  /* IEEE MMD Register 7.61 */
 #define I82579_EEE_100_SUPPORTED       (1 << 1)        /* 100BaseTx EEE */
 #define I82579_EEE_1000_SUPPORTED      (1 << 2)        /* 1000BaseTx EEE */
+#define I82579_LPI_100_PLL_SHUT        (1 << 2)        /* 100M LPI PLL Shut Enabled */
 #define I217_EEE_PCS_STATUS    0x9401  /* IEEE MMD Register 3.1 */
 #define I217_EEE_CAPABILITY    0x8000  /* IEEE MMD Register 3.20 */
 #define I217_EEE_ADVERTISEMENT 0x8001  /* IEEE MMD Register 7.60 */
 #define I217_EEE_LP_ABILITY    0x8002  /* IEEE MMD Register 7.61 */
+#define I217_RX_CONFIG         0xB20C  /* Receive configuration */
 
 #define E1000_EEE_RX_LPI_RCVD  0x0400  /* Tx LP idle received */
 #define E1000_EEE_TX_LPI_RCVD  0x0800  /* Rx LP idle received */
index d50c91e5052808b9485a44538e4b0821b9b30e88..e4207efd13f8651cf75b5ea9ba3a94a437f189a4 100644 (file)
@@ -599,6 +599,7 @@ static void e1000e_update_rdt_wa(struct e1000_ring *rx_ring, unsigned int i)
 
        if (unlikely(!ret_val && (i != readl(rx_ring->tail)))) {
                u32 rctl = er32(RCTL);
+
                ew32(RCTL, rctl & ~E1000_RCTL_EN);
                e_err("ME firmware caused invalid RDT - resetting\n");
                schedule_work(&adapter->reset_task);
@@ -615,6 +616,7 @@ static void e1000e_update_tdt_wa(struct e1000_ring *tx_ring, unsigned int i)
 
        if (unlikely(!ret_val && (i != readl(tx_ring->tail)))) {
                u32 tctl = er32(TCTL);
+
                ew32(TCTL, tctl & ~E1000_TCTL_EN);
                e_err("ME firmware caused invalid TDT - resetting\n");
                schedule_work(&adapter->reset_task);
@@ -1165,7 +1167,7 @@ static void e1000e_tx_hwtstamp_work(struct work_struct *work)
                dev_kfree_skb_any(adapter->tx_hwtstamp_skb);
                adapter->tx_hwtstamp_skb = NULL;
                adapter->tx_hwtstamp_timeouts++;
-               e_warn("clearing Tx timestamp hang");
+               e_warn("clearing Tx timestamp hang\n");
        } else {
                /* reschedule to check later */
                schedule_work(&adapter->tx_hwtstamp_work);
@@ -1198,6 +1200,7 @@ static bool e1000_clean_tx_irq(struct e1000_ring *tx_ring)
        while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) &&
               (count < tx_ring->count)) {
                bool cleaned = false;
+
                rmb();          /* read buffer_info after eop_desc */
                for (; !cleaned; count++) {
                        tx_desc = E1000_TX_DESC(*tx_ring, i);
@@ -1753,6 +1756,7 @@ static irqreturn_t e1000_intr_msi(int __always_unused irq, void *data)
                    adapter->flags & FLAG_RX_NEEDS_RESTART) {
                        /* disable receives */
                        u32 rctl = er32(RCTL);
+
                        ew32(RCTL, rctl & ~E1000_RCTL_EN);
                        adapter->flags |= FLAG_RESTART_NOW;
                }
@@ -1960,6 +1964,7 @@ static void e1000_configure_msix(struct e1000_adapter *adapter)
        /* Workaround issue with spurious interrupts on 82574 in MSI-X mode */
        if (hw->mac.type == e1000_82574) {
                u32 rfctl = er32(RFCTL);
+
                rfctl |= E1000_RFCTL_ACK_DIS;
                ew32(RFCTL, rfctl);
        }
@@ -2204,6 +2209,7 @@ static void e1000_irq_disable(struct e1000_adapter *adapter)
 
        if (adapter->msix_entries) {
                int i;
+
                for (i = 0; i < adapter->num_vectors; i++)
                        synchronize_irq(adapter->msix_entries[i].vector);
        } else {
@@ -2921,6 +2927,7 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
 
        if (adapter->flags2 & FLAG2_DMA_BURST) {
                u32 txdctl = er32(TXDCTL(0));
+
                txdctl &= ~(E1000_TXDCTL_PTHRESH | E1000_TXDCTL_HTHRESH |
                            E1000_TXDCTL_WTHRESH);
                /* set up some performance related parameters to encourage the
@@ -3239,6 +3246,7 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
 
                if (adapter->flags & FLAG_IS_ICH) {
                        u32 rxdctl = er32(RXDCTL(0));
+
                        ew32(RXDCTL(0), rxdctl | 0x3);
                }
 
@@ -4695,6 +4703,7 @@ static void e1000e_update_stats(struct e1000_adapter *adapter)
        /* Correctable ECC Errors */
        if (hw->mac.type == e1000_pch_lpt) {
                u32 pbeccsts = er32(PBECCSTS);
+
                adapter->corr_errors +=
                    pbeccsts & E1000_PBECCSTS_CORR_ERR_CNT_MASK;
                adapter->uncorr_errors +=
@@ -4808,6 +4817,7 @@ static void e1000e_enable_receives(struct e1000_adapter *adapter)
            (adapter->flags & FLAG_RESTART_NOW)) {
                struct e1000_hw *hw = &adapter->hw;
                u32 rctl = er32(RCTL);
+
                ew32(RCTL, rctl | E1000_RCTL_EN);
                adapter->flags &= ~FLAG_RESTART_NOW;
        }
@@ -4930,6 +4940,7 @@ static void e1000_watchdog_task(struct work_struct *work)
                        if ((adapter->flags & FLAG_TARC_SPEED_MODE_BIT) &&
                            !txb2b) {
                                u32 tarc0;
+
                                tarc0 = er32(TARC(0));
                                tarc0 &= ~SPEED_MODE_BIT;
                                ew32(TARC(0), tarc0);
@@ -5170,7 +5181,7 @@ static bool e1000_tx_csum(struct e1000_ring *tx_ring, struct sk_buff *skb)
        __be16 protocol;
 
        if (skb->ip_summed != CHECKSUM_PARTIAL)
-               return 0;
+               return false;
 
        if (skb->protocol == cpu_to_be16(ETH_P_8021Q))
                protocol = vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
@@ -5215,7 +5226,7 @@ static bool e1000_tx_csum(struct e1000_ring *tx_ring, struct sk_buff *skb)
                i = 0;
        tx_ring->next_to_use = i;
 
-       return 1;
+       return true;
 }
 
 static int e1000_tx_map(struct e1000_ring *tx_ring, struct sk_buff *skb,
@@ -5687,7 +5698,7 @@ struct rtnl_link_stats64 *e1000e_get_stats64(struct net_device *netdev,
 static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
 {
        struct e1000_adapter *adapter = netdev_priv(netdev);
-       int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
+       int max_frame = new_mtu + VLAN_HLEN + ETH_HLEN + ETH_FCS_LEN;
 
        /* Jumbo frame support */
        if ((max_frame > ETH_FRAME_LEN + ETH_FCS_LEN) &&
@@ -6209,6 +6220,7 @@ static int __e1000_resume(struct pci_dev *pdev)
                e1e_wphy(&adapter->hw, BM_WUS, ~0);
        } else {
                u32 wus = er32(WUS);
+
                if (wus) {
                        e_info("MAC Wakeup cause - %s\n",
                               wus & E1000_WUS_EX ? "Unicast Packet" :
@@ -6235,6 +6247,7 @@ static int __e1000_resume(struct pci_dev *pdev)
        return 0;
 }
 
+#ifdef CONFIG_PM_SLEEP
 static int e1000e_pm_thaw(struct device *dev)
 {
        struct net_device *netdev = pci_get_drvdata(to_pci_dev(dev));
@@ -6255,7 +6268,6 @@ static int e1000e_pm_thaw(struct device *dev)
        return 0;
 }
 
-#ifdef CONFIG_PM_SLEEP
 static int e1000e_pm_suspend(struct device *dev)
 {
        struct pci_dev *pdev = to_pci_dev(dev);
@@ -7027,7 +7039,7 @@ static const struct pci_error_handlers e1000_err_handler = {
        .resume = e1000_io_resume,
 };
 
-static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = {
+static const struct pci_device_id e1000_pci_tbl[] = {
        { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_COPPER), board_82571 },
        { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_FIBER), board_82571 },
        { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_COPPER), board_82571 },
@@ -7144,6 +7156,7 @@ static struct pci_driver e1000_driver = {
 static int __init e1000_init_module(void)
 {
        int ret;
+
        pr_info("Intel(R) PRO/1000 Network Driver - %s\n",
                e1000e_driver_version);
        pr_info("Copyright(c) 1999 - 2014 Intel Corporation.\n");
index a9a976f04bffe957e22b0852bd8918c6d6f63bd0..b1f212b7baf7e71f0b2a3a160463898019778a00 100644 (file)
@@ -398,6 +398,7 @@ s32 e1000e_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
                /* Loop to allow for up to whole page write of eeprom */
                while (widx < words) {
                        u16 word_out = data[widx];
+
                        word_out = (word_out >> 8) | (word_out << 8);
                        e1000_shift_out_eec_bits(hw, word_out, 16);
                        widx++;
index d0ac0f3249c886415d308c4a0cd376feda3d44db..aa1923f7ebdd2e56dd0ebd436feb69164426ad7c 100644 (file)
@@ -436,6 +436,7 @@ void e1000e_check_options(struct e1000_adapter *adapter)
 
                if (num_IntMode > bd) {
                        unsigned int int_mode = IntMode[bd];
+
                        e1000_validate_option(&int_mode, &opt, adapter);
                        adapter->int_mode = int_mode;
                } else {
@@ -457,6 +458,7 @@ void e1000e_check_options(struct e1000_adapter *adapter)
 
                if (num_SmartPowerDownEnable > bd) {
                        unsigned int spd = SmartPowerDownEnable[bd];
+
                        e1000_validate_option(&spd, &opt, adapter);
                        if ((adapter->flags & FLAG_HAS_SMART_POWER_DOWN) && spd)
                                adapter->flags |= FLAG_SMART_POWER_DOWN;
@@ -473,6 +475,7 @@ void e1000e_check_options(struct e1000_adapter *adapter)
 
                if (num_CrcStripping > bd) {
                        unsigned int crc_stripping = CrcStripping[bd];
+
                        e1000_validate_option(&crc_stripping, &opt, adapter);
                        if (crc_stripping == OPTION_ENABLED) {
                                adapter->flags2 |= FLAG2_CRC_STRIPPING;
@@ -495,6 +498,7 @@ void e1000e_check_options(struct e1000_adapter *adapter)
 
                if (num_KumeranLockLoss > bd) {
                        unsigned int kmrn_lock_loss = KumeranLockLoss[bd];
+
                        e1000_validate_option(&kmrn_lock_loss, &opt, adapter);
                        enabled = kmrn_lock_loss;
                }
index 00b3fc98bf309bf3d371a984f0da7245caf07013..b2005e13fb01583a10f58aa37339feb2336191bd 100644 (file)
@@ -2896,6 +2896,7 @@ static s32 __e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data,
                    (hw->phy.addr == 2) &&
                    !(MAX_PHY_REG_ADDRESS & reg) && (data & (1 << 11))) {
                        u16 data2 = 0x7EFF;
+
                        ret_val = e1000_access_phy_debug_regs_hv(hw,
                                                                 (1 << 6) | 0x3,
                                                                 &data2, false);
index 3841bccf058c7aa0fe3b2f90e2c70c38e9b6f209..537d2780b408b3cdc9e0fc546d45c04444cc86e2 100644 (file)
@@ -164,6 +164,7 @@ s32 e1000_get_cable_length_82577(struct e1000_hw *hw);
 #define HV_M_STATUS_AUTONEG_COMPLETE   0x1000
 #define HV_M_STATUS_SPEED_MASK         0x0300
 #define HV_M_STATUS_SPEED_1000         0x0200
+#define HV_M_STATUS_SPEED_100          0x0100
 #define HV_M_STATUS_LINK_UP            0x0040
 
 #define IGP01E1000_PHY_PCS_INIT_REG    0x00B4
index beb7b4393a6c26fc46c917a798a6fd7bfaee28ea..ef5bb11557e54b2d47e075bfff0a1cb47c786823 100644 (file)
 #define STRINGIFY(foo)  #foo
 #define XSTRINGIFY(bar) STRINGIFY(bar)
 
-#ifndef ARCH_HAS_PREFETCH
-#define prefetch(X)
-#endif
-
 #define I40E_RX_DESC(R, i)                     \
        ((ring_is_16byte_desc_enabled(R))       \
                ? (union i40e_32byte_rx_desc *) \
@@ -329,9 +325,7 @@ struct i40e_pf {
        struct ptp_clock *ptp_clock;
        struct ptp_clock_info ptp_caps;
        struct sk_buff *ptp_tx_skb;
-       struct work_struct ptp_tx_work;
        struct hwtstamp_config tstamp_config;
-       unsigned long ptp_tx_start;
        unsigned long last_rx_ptp_check;
        spinlock_t tmreg_lock; /* Used to protect the device time registers. */
        u64 ptp_base_adj;
index ed3902bf249b3e4ceb047ed14762d6ea50b1a4c0..34415d342ece381a97755b051f68945b254303b8 100644 (file)
 
 static void i40e_resume_aq(struct i40e_hw *hw);
 
+/**
+ * i40e_is_nvm_update_op - return true if this is an NVM update operation
+ * @desc: API request descriptor
+ **/
+static inline bool i40e_is_nvm_update_op(struct i40e_aq_desc *desc)
+{
+       return (desc->opcode == i40e_aqc_opc_nvm_erase) ||
+              (desc->opcode == i40e_aqc_opc_nvm_update);
+}
+
 /**
  *  i40e_adminq_init_regs - Initialize AdminQ registers
  *  @hw: pointer to the hardware structure
@@ -585,6 +595,7 @@ i40e_status i40e_init_adminq(struct i40e_hw *hw)
 
        /* pre-emptive resource lock release */
        i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL);
+       hw->aq.nvm_busy = false;
 
        ret_code = i40e_aq_set_hmc_resource_profile(hw,
                                                    I40E_HMC_PROFILE_DEFAULT,
@@ -708,6 +719,12 @@ i40e_status i40e_asq_send_command(struct i40e_hw *hw,
                goto asq_send_command_exit;
        }
 
+       if (i40e_is_nvm_update_op(desc) && hw->aq.nvm_busy) {
+               i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQTX: NVM busy.\n");
+               status = I40E_ERR_NVM;
+               goto asq_send_command_exit;
+       }
+
        details = I40E_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use);
        if (cmd_details) {
                *details = *cmd_details;
@@ -835,6 +852,9 @@ i40e_status i40e_asq_send_command(struct i40e_hw *hw,
                hw->aq.asq_last_status = (enum i40e_admin_queue_err)retval;
        }
 
+       if (i40e_is_nvm_update_op(desc))
+               hw->aq.nvm_busy = true;
+
        /* update the error if time out occurred */
        if ((!cmd_completed) &&
            (!details->async && !details->postpone)) {
@@ -929,6 +949,9 @@ i40e_status i40e_clean_arq_element(struct i40e_hw *hw,
                               e->msg_size);
        }
 
+       if (i40e_is_nvm_update_op(&e->desc))
+               hw->aq.nvm_busy = false;
+
        /* Restore the original datalen and buffer address in the desc,
         * FW updates datalen to indicate the event message
         * size
index 993f7685a9111694726c1a9bee44f3653651df72..b1552fbc48a0e6876acf838ab7eadf1e72fe492e 100644 (file)
@@ -90,6 +90,7 @@ struct i40e_adminq_info {
        u16 fw_min_ver;                 /* firmware minor version */
        u16 api_maj_ver;                /* api major version */
        u16 api_min_ver;                /* api minor version */
+       bool nvm_busy;
 
        struct mutex asq_mutex; /* Send queue lock */
        struct mutex arq_mutex; /* Receive queue lock */
index 7b6374a8f8da5cbb5054c431dce910d75de205c5..f2ba4b76ecd31a8b431475ce101353be693c354a 100644 (file)
@@ -182,9 +182,6 @@ enum i40e_admin_queue_opc {
        i40e_aqc_opc_add_mirror_rule    = 0x0260,
        i40e_aqc_opc_delete_mirror_rule = 0x0261,
 
-       i40e_aqc_opc_set_storm_control_config = 0x0280,
-       i40e_aqc_opc_get_storm_control_config = 0x0281,
-
        /* DCB commands */
        i40e_aqc_opc_dcb_ignore_pfc = 0x0301,
        i40e_aqc_opc_dcb_updated    = 0x0302,
@@ -207,6 +204,7 @@ enum i40e_admin_queue_opc {
        i40e_aqc_opc_query_switching_comp_bw_config        = 0x041A,
        i40e_aqc_opc_suspend_port_tx                       = 0x041B,
        i40e_aqc_opc_resume_port_tx                        = 0x041C,
+       i40e_aqc_opc_configure_partition_bw                = 0x041D,
 
        /* hmc */
        i40e_aqc_opc_query_hmc_resource_profile = 0x0500,
@@ -1289,27 +1287,6 @@ struct i40e_aqc_add_delete_mirror_rule_completion {
 
 I40E_CHECK_CMD_LENGTH(i40e_aqc_add_delete_mirror_rule_completion);
 
-/* Set Storm Control Configuration (direct 0x0280)
- * Get Storm Control Configuration (direct 0x0281)
- *    the command and response use the same descriptor structure
- */
-struct i40e_aqc_set_get_storm_control_config {
-       __le32 broadcast_threshold;
-       __le32 multicast_threshold;
-       __le32 control_flags;
-#define I40E_AQC_STORM_CONTROL_MDIPW            0x01
-#define I40E_AQC_STORM_CONTROL_MDICW            0x02
-#define I40E_AQC_STORM_CONTROL_BDIPW            0x04
-#define I40E_AQC_STORM_CONTROL_BDICW            0x08
-#define I40E_AQC_STORM_CONTROL_BIDU             0x10
-#define I40E_AQC_STORM_CONTROL_INTERVAL_SHIFT   8
-#define I40E_AQC_STORM_CONTROL_INTERVAL_MASK    (0x3FF << \
-                                       I40E_AQC_STORM_CONTROL_INTERVAL_SHIFT)
-       u8     reserved[4];
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_set_get_storm_control_config);
-
 /* DCB 0x03xx*/
 
 /* PFC Ignore (direct 0x0301)
@@ -1499,6 +1476,15 @@ struct i40e_aqc_query_switching_comp_bw_config_resp {
  * (direct 0x041B and 0x041C) uses the generic SEID struct
  */
 
+/* Configure partition BW
+ * (indirect 0x041D)
+ */
+struct i40e_aqc_configure_partition_bw_data {
+       __le16 pf_valid_bits;
+       u8     min_bw[16];      /* guaranteed bandwidth */
+       u8     max_bw[16];      /* bandwidth limit */
+};
+
 /* Get and set the active HMC resource profile and status.
  * (direct 0x0500) and (direct 0x0501)
  */
@@ -1583,11 +1569,8 @@ struct i40e_aq_get_phy_abilities_resp {
 #define I40E_AQ_PHY_FLAG_PAUSE_TX         0x01
 #define I40E_AQ_PHY_FLAG_PAUSE_RX         0x02
 #define I40E_AQ_PHY_FLAG_LOW_POWER        0x04
-#define I40E_AQ_PHY_FLAG_AN_SHIFT         3
-#define I40E_AQ_PHY_FLAG_AN_MASK          (0x3 << I40E_AQ_PHY_FLAG_AN_SHIFT)
-#define I40E_AQ_PHY_FLAG_AN_OFF           0x00 /* link forced on */
-#define I40E_AQ_PHY_FLAG_AN_OFF_LINK_DOWN 0x01
-#define I40E_AQ_PHY_FLAG_AN_ON            0x02
+#define I40E_AQ_PHY_LINK_ENABLED                 0x08
+#define I40E_AQ_PHY_AN_ENABLED                   0x10
 #define I40E_AQ_PHY_FLAG_MODULE_QUAL      0x20
        __le16 eee_capability;
 #define I40E_AQ_EEE_100BASE_TX       0x0002
index 922cdcc45c54b1d36de2a7c0b7faf0ed8cdf1e0d..22eefda3a5303d6a352fa75ce8a7b4d60707fe13 100644 (file)
@@ -975,6 +975,13 @@ i40e_status i40e_aq_get_link_info(struct i40e_hw *hw,
        hw_link_info->an_info = resp->an_info;
        hw_link_info->ext_info = resp->ext_info;
        hw_link_info->loopback = resp->loopback;
+       hw_link_info->max_frame_size = le16_to_cpu(resp->max_frame_size);
+       hw_link_info->pacing = resp->config & I40E_AQ_CONFIG_PACING_MASK;
+
+       if (resp->config & I40E_AQ_CONFIG_CRC_ENA)
+               hw_link_info->crc_enable = true;
+       else
+               hw_link_info->crc_enable = false;
 
        if (resp->command_flags & cpu_to_le16(I40E_AQ_LSE_ENABLE))
                hw_link_info->lse_enable = true;
@@ -1300,6 +1307,7 @@ i40e_status i40e_aq_send_driver_version(struct i40e_hw *hw,
        struct i40e_aqc_driver_version *cmd =
                (struct i40e_aqc_driver_version *)&desc.params.raw;
        i40e_status status;
+       u16 len;
 
        if (dv == NULL)
                return I40E_ERR_PARAM;
@@ -1311,7 +1319,14 @@ i40e_status i40e_aq_send_driver_version(struct i40e_hw *hw,
        cmd->driver_minor_ver = dv->minor_version;
        cmd->driver_build_ver = dv->build_version;
        cmd->driver_subbuild_ver = dv->subbuild_version;
-       status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+       len = 0;
+       while (len < sizeof(dv->driver_string) &&
+              (dv->driver_string[len] < 0x80) &&
+              dv->driver_string[len])
+               len++;
+       status = i40e_asq_send_command(hw, &desc, dv->driver_string,
+                                      len, cmd_details);
 
        return status;
 }
@@ -2094,8 +2109,8 @@ i40e_status i40e_aq_start_lldp(struct i40e_hw *hw,
  * @cmd_details: pointer to command details structure or NULL
  **/
 i40e_status i40e_aq_add_udp_tunnel(struct i40e_hw *hw,
-                               u16 udp_port, u8 header_len,
-                               u8 protocol_index, u8 *filter_index,
+                               u16 udp_port, u8 protocol_index,
+                               u8 *filter_index,
                                struct i40e_asq_cmd_details *cmd_details)
 {
        struct i40e_aq_desc desc;
@@ -2252,6 +2267,35 @@ static i40e_status i40e_aq_tx_sched_cmd(struct i40e_hw *hw, u16 seid,
        return status;
 }
 
+/**
+ * i40e_aq_config_vsi_bw_limit - Configure VSI BW Limit
+ * @hw: pointer to the hw struct
+ * @seid: VSI seid
+ * @credit: BW limit credits (0 = disabled)
+ * @max_credit: Max BW limit credits
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+i40e_status i40e_aq_config_vsi_bw_limit(struct i40e_hw *hw,
+                               u16 seid, u16 credit, u8 max_credit,
+                               struct i40e_asq_cmd_details *cmd_details)
+{
+       struct i40e_aq_desc desc;
+       struct i40e_aqc_configure_vsi_bw_limit *cmd =
+               (struct i40e_aqc_configure_vsi_bw_limit *)&desc.params.raw;
+       i40e_status status;
+
+       i40e_fill_default_direct_cmd_desc(&desc,
+                                         i40e_aqc_opc_configure_vsi_bw_limit);
+
+       cmd->vsi_seid = cpu_to_le16(seid);
+       cmd->credit = cpu_to_le16(credit);
+       cmd->max_credit = max_credit;
+
+       status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+       return status;
+}
+
 /**
  * i40e_aq_config_vsi_tc_bw - Config VSI BW Allocation per TC
  * @hw: pointer to the hw struct
index 3c37386fd138fdeb3941170e5789687d90385fd5..1aaec400b28ecb3120b7236a516b13fb59f160b9 100644 (file)
@@ -1744,10 +1744,6 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
                i40e_dbg_cmd_fd_ctrl(pf, I40E_FLAG_FD_ATR_ENABLED, false);
        } else if (strncmp(cmd_buf, "fd-atr on", 9) == 0) {
                i40e_dbg_cmd_fd_ctrl(pf, I40E_FLAG_FD_ATR_ENABLED, true);
-       } else if (strncmp(cmd_buf, "fd-sb off", 9) == 0) {
-               i40e_dbg_cmd_fd_ctrl(pf, I40E_FLAG_FD_SB_ENABLED, false);
-       } else if (strncmp(cmd_buf, "fd-sb on", 8) == 0) {
-               i40e_dbg_cmd_fd_ctrl(pf, I40E_FLAG_FD_SB_ENABLED, true);
        } else if (strncmp(cmd_buf, "lldp", 4) == 0) {
                if (strncmp(&cmd_buf[5], "stop", 4) == 0) {
                        int ret;
@@ -1967,8 +1963,6 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
                dev_info(&pf->pdev->dev, "  rem fd_filter <dest q_index> <flex_off> <pctype> <dest_vsi> <dest_ctl> <fd_status> <cnt_index> <fd_id> <packet_len> <packet>\n");
                dev_info(&pf->pdev->dev, "  fd-atr off\n");
                dev_info(&pf->pdev->dev, "  fd-atr on\n");
-               dev_info(&pf->pdev->dev, "  fd-sb off\n");
-               dev_info(&pf->pdev->dev, "  fd-sb on\n");
                dev_info(&pf->pdev->dev, "  lldp start\n");
                dev_info(&pf->pdev->dev, "  lldp stop\n");
                dev_info(&pf->pdev->dev, "  lldp get local\n");
index 03d99cbc5c251bcbb0120667ff1d53e304da00a9..861e1db47a71dcfa78ee6e04ec4fbf20b7e55b59 100644 (file)
@@ -112,7 +112,6 @@ static struct i40e_stats i40e_gstrings_stats[] = {
        I40E_PF_STAT("rx_oversize", stats.rx_oversize),
        I40E_PF_STAT("rx_jabber", stats.rx_jabber),
        I40E_PF_STAT("VF_admin_queue_requests", vf_aq_requests),
-       I40E_PF_STAT("tx_hwtstamp_timeouts", tx_hwtstamp_timeouts),
        I40E_PF_STAT("rx_hwtstamp_cleared", rx_hwtstamp_cleared),
        /* LPI stats */
        I40E_PF_STAT("tx_lpi_status", stats.tx_lpi_status),
@@ -122,8 +121,9 @@ static struct i40e_stats i40e_gstrings_stats[] = {
 };
 
 #define I40E_QUEUE_STATS_LEN(n) \
-  ((((struct i40e_netdev_priv *)netdev_priv((n)))->vsi->num_queue_pairs + \
-    ((struct i40e_netdev_priv *)netdev_priv((n)))->vsi->num_queue_pairs) * 2)
+       (((struct i40e_netdev_priv *)netdev_priv((n)))->vsi->num_queue_pairs \
+           * 2 /* Tx and Rx together */                                     \
+           * (sizeof(struct i40e_queue_stats) / sizeof(u64)))
 #define I40E_GLOBAL_STATS_LEN  ARRAY_SIZE(i40e_gstrings_stats)
 #define I40E_NETDEV_STATS_LEN   ARRAY_SIZE(i40e_gstrings_net_stats)
 #define I40E_VSI_STATS_LEN(n)   (I40E_NETDEV_STATS_LEN + \
@@ -649,7 +649,7 @@ static void i40e_get_ethtool_stats(struct net_device *netdev,
                        sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
        }
        rcu_read_lock();
-       for (j = 0; j < vsi->num_queue_pairs; j++, i += 4) {
+       for (j = 0; j < vsi->num_queue_pairs; j++) {
                struct i40e_ring *tx_ring = ACCESS_ONCE(vsi->tx_rings[j]);
                struct i40e_ring *rx_ring;
 
@@ -662,14 +662,16 @@ static void i40e_get_ethtool_stats(struct net_device *netdev,
                        data[i] = tx_ring->stats.packets;
                        data[i + 1] = tx_ring->stats.bytes;
                } while (u64_stats_fetch_retry_irq(&tx_ring->syncp, start));
+               i += 2;
 
                /* Rx ring is the 2nd half of the queue pair */
                rx_ring = &tx_ring[1];
                do {
                        start = u64_stats_fetch_begin_irq(&rx_ring->syncp);
-                       data[i + 2] = rx_ring->stats.packets;
-                       data[i + 3] = rx_ring->stats.bytes;
+                       data[i] = rx_ring->stats.packets;
+                       data[i + 1] = rx_ring->stats.bytes;
                } while (u64_stats_fetch_retry_irq(&rx_ring->syncp, start));
+               i += 2;
        }
        rcu_read_unlock();
        if (vsi == pf->vsi[pf->lan_vsi]) {
@@ -1007,14 +1009,13 @@ static int i40e_get_coalesce(struct net_device *netdev,
        ec->rx_max_coalesced_frames_irq = vsi->work_limit;
 
        if (ITR_IS_DYNAMIC(vsi->rx_itr_setting))
-               ec->rx_coalesce_usecs = 1;
-       else
-               ec->rx_coalesce_usecs = vsi->rx_itr_setting;
+               ec->use_adaptive_rx_coalesce = 1;
 
        if (ITR_IS_DYNAMIC(vsi->tx_itr_setting))
-               ec->tx_coalesce_usecs = 1;
-       else
-               ec->tx_coalesce_usecs = vsi->tx_itr_setting;
+               ec->use_adaptive_tx_coalesce = 1;
+
+       ec->rx_coalesce_usecs = vsi->rx_itr_setting & ~I40E_ITR_DYNAMIC;
+       ec->tx_coalesce_usecs = vsi->tx_itr_setting & ~I40E_ITR_DYNAMIC;
 
        return 0;
 }
@@ -1033,37 +1034,27 @@ static int i40e_set_coalesce(struct net_device *netdev,
        if (ec->tx_max_coalesced_frames_irq || ec->rx_max_coalesced_frames_irq)
                vsi->work_limit = ec->tx_max_coalesced_frames_irq;
 
-       switch (ec->rx_coalesce_usecs) {
-       case 0:
-               vsi->rx_itr_setting = 0;
-               break;
-       case 1:
-               vsi->rx_itr_setting = (I40E_ITR_DYNAMIC |
-                                      ITR_REG_TO_USEC(I40E_ITR_RX_DEF));
-               break;
-       default:
-               if ((ec->rx_coalesce_usecs < (I40E_MIN_ITR << 1)) ||
-                   (ec->rx_coalesce_usecs > (I40E_MAX_ITR << 1)))
-                       return -EINVAL;
+       if ((ec->rx_coalesce_usecs >= (I40E_MIN_ITR << 1)) &&
+           (ec->rx_coalesce_usecs <= (I40E_MAX_ITR << 1)))
                vsi->rx_itr_setting = ec->rx_coalesce_usecs;
-               break;
-       }
+       else
+               return -EINVAL;
 
-       switch (ec->tx_coalesce_usecs) {
-       case 0:
-               vsi->tx_itr_setting = 0;
-               break;
-       case 1:
-               vsi->tx_itr_setting = (I40E_ITR_DYNAMIC |
-                                      ITR_REG_TO_USEC(I40E_ITR_TX_DEF));
-               break;
-       default:
-               if ((ec->tx_coalesce_usecs < (I40E_MIN_ITR << 1)) ||
-                   (ec->tx_coalesce_usecs > (I40E_MAX_ITR << 1)))
-                       return -EINVAL;
+       if ((ec->tx_coalesce_usecs >= (I40E_MIN_ITR << 1)) &&
+           (ec->tx_coalesce_usecs <= (I40E_MAX_ITR << 1)))
                vsi->tx_itr_setting = ec->tx_coalesce_usecs;
-               break;
-       }
+       else
+               return -EINVAL;
+
+       if (ec->use_adaptive_rx_coalesce)
+               vsi->rx_itr_setting |= I40E_ITR_DYNAMIC;
+       else
+               vsi->rx_itr_setting &= ~I40E_ITR_DYNAMIC;
+
+       if (ec->use_adaptive_tx_coalesce)
+               vsi->tx_itr_setting |= I40E_ITR_DYNAMIC;
+       else
+               vsi->tx_itr_setting &= ~I40E_ITR_DYNAMIC;
 
        vector = vsi->base_vector;
        for (i = 0; i < vsi->num_q_vectors; i++, vector++) {
@@ -1189,6 +1180,12 @@ static int i40e_get_ethtool_fdir_entry(struct i40e_pf *pf,
                return -EINVAL;
 
        fsp->flow_type = rule->flow_type;
+       if (fsp->flow_type == IP_USER_FLOW) {
+               fsp->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
+               fsp->h_u.usr_ip4_spec.proto = 0;
+               fsp->m_u.usr_ip4_spec.proto = 0;
+       }
+
        fsp->h_u.tcp_ip4_spec.psrc = rule->src_port;
        fsp->h_u.tcp_ip4_spec.pdst = rule->dst_port;
        fsp->h_u.tcp_ip4_spec.ip4src = rule->src_ip[0];
@@ -1692,5 +1689,5 @@ static const struct ethtool_ops i40e_ethtool_ops = {
 
 void i40e_set_ethtool_ops(struct net_device *netdev)
 {
-       SET_ETHTOOL_OPS(netdev, &i40e_ethtool_ops);
+       netdev->ethtool_ops = &i40e_ethtool_ops;
 }
index d5d98fe2691dd6048f7edba7f6b4847076e5316d..5c341aeb5d530de5318bc8847d4c6762c3f8abc1 100644 (file)
@@ -747,6 +747,7 @@ static struct i40e_context_ele i40e_hmc_rxq_ce_info[] = {
        { I40E_HMC_STORE(i40e_hmc_obj_rxq, tphdata_ena),  1,    195 },
        { I40E_HMC_STORE(i40e_hmc_obj_rxq, tphhead_ena),  1,    196 },
        { I40E_HMC_STORE(i40e_hmc_obj_rxq, lrxqthresh),   3,    198 },
+       { I40E_HMC_STORE(i40e_hmc_obj_rxq, prefena),      1,    201 },
        { 0 }
 };
 
index 341de925a2983181a1cd7d7f18d59996fd128158..eb65fe23c4a70077e31e2546208aaeaecf93224d 100644 (file)
@@ -56,6 +56,7 @@ struct i40e_hmc_obj_rxq {
        u8  tphdata_ena;
        u8  tphhead_ena;
        u8  lrxqthresh;
+       u8  prefena;    /* NOTE: normally must be set to 1 at init */
 };
 
 /* Tx queue context data */
index 861b722c2672e78b0ed0ccbe2cdd2f4428d8f472..e0e5c6a867b196e2ad4852c820c9b6147e0faf9c 100644 (file)
@@ -39,7 +39,7 @@ static const char i40e_driver_string[] =
 
 #define DRV_VERSION_MAJOR 0
 #define DRV_VERSION_MINOR 3
-#define DRV_VERSION_BUILD 36
+#define DRV_VERSION_BUILD 46
 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
             __stringify(DRV_VERSION_MINOR) "." \
             __stringify(DRV_VERSION_BUILD)    DRV_KERN
@@ -2312,6 +2312,8 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
        rx_ctx.crcstrip = 1;
        rx_ctx.l2tsel = 1;
        rx_ctx.showiv = 1;
+       /* set the prefena field to 1 because the manual says to */
+       rx_ctx.prefena = 1;
 
        /* clear the context in the HMC */
        err = i40e_clear_lan_rx_queue_context(hw, pf_q);
@@ -2897,12 +2899,9 @@ static irqreturn_t i40e_intr(int irq, void *data)
                u32 prttsyn_stat = rd32(hw, I40E_PRTTSYN_STAT_0);
 
                if (prttsyn_stat & I40E_PRTTSYN_STAT_0_TXTIME_MASK) {
-                       ena_mask &= ~I40E_PFINT_ICR0_ENA_TIMESYNC_MASK;
+                       icr0 &= ~I40E_PFINT_ICR0_ENA_TIMESYNC_MASK;
                        i40e_ptp_tx_hwtstamp(pf);
-                       prttsyn_stat &= ~I40E_PRTTSYN_STAT_0_TXTIME_MASK;
                }
-
-               wr32(hw, I40E_PRTTSYN_STAT_0, prttsyn_stat);
        }
 
        /* If a critical error is pending we have no choice but to reset the
@@ -3163,9 +3162,7 @@ static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable)
                        usleep_range(1000, 2000);
                }
                /* Skip if the queue is already in the requested state */
-               if (enable && (tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
-                       continue;
-               if (!enable && !(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
+               if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
                        continue;
 
                /* turn on/off the queue */
@@ -3181,13 +3178,8 @@ static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable)
                /* wait for the change to finish */
                for (j = 0; j < 10; j++) {
                        tx_reg = rd32(hw, I40E_QTX_ENA(pf_q));
-                       if (enable) {
-                               if ((tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
-                                       break;
-                       } else {
-                               if (!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
-                                       break;
-                       }
+                       if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
+                               break;
 
                        udelay(10);
                }
@@ -3226,15 +3218,9 @@ static int i40e_vsi_control_rx(struct i40e_vsi *vsi, bool enable)
                        usleep_range(1000, 2000);
                }
 
-               if (enable) {
-                       /* is STAT set ? */
-                       if ((rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
-                               continue;
-               } else {
-                       /* is !STAT set ? */
-                       if (!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
-                               continue;
-               }
+               /* Skip if the queue is already in the requested state */
+               if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
+                       continue;
 
                /* turn on/off the queue */
                if (enable)
@@ -3247,13 +3233,8 @@ static int i40e_vsi_control_rx(struct i40e_vsi *vsi, bool enable)
                for (j = 0; j < 10; j++) {
                        rx_reg = rd32(hw, I40E_QRX_ENA(pf_q));
 
-                       if (enable) {
-                               if ((rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
-                                       break;
-                       } else {
-                               if (!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
-                                       break;
-                       }
+                       if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
+                               break;
 
                        udelay(10);
                }
@@ -3515,6 +3496,19 @@ static void i40e_napi_disable_all(struct i40e_vsi *vsi)
                napi_disable(&vsi->q_vectors[q_idx]->napi);
 }
 
+/**
+ * i40e_vsi_close - Shut down a VSI
+ * @vsi: the vsi to be quelled
+ **/
+static void i40e_vsi_close(struct i40e_vsi *vsi)
+{
+       if (!test_and_set_bit(__I40E_DOWN, &vsi->state))
+               i40e_down(vsi);
+       i40e_vsi_free_irq(vsi);
+       i40e_vsi_free_tx_resources(vsi);
+       i40e_vsi_free_rx_resources(vsi);
+}
+
 /**
  * i40e_quiesce_vsi - Pause a given VSI
  * @vsi: the VSI being paused
@@ -3528,8 +3522,7 @@ static void i40e_quiesce_vsi(struct i40e_vsi *vsi)
        if (vsi->netdev && netif_running(vsi->netdev)) {
                vsi->netdev->netdev_ops->ndo_stop(vsi->netdev);
        } else {
-               set_bit(__I40E_DOWN, &vsi->state);
-               i40e_down(vsi);
+               i40e_vsi_close(vsi);
        }
 }
 
@@ -3546,7 +3539,7 @@ static void i40e_unquiesce_vsi(struct i40e_vsi *vsi)
        if (vsi->netdev && netif_running(vsi->netdev))
                vsi->netdev->netdev_ops->ndo_open(vsi->netdev);
        else
-               i40e_up(vsi);   /* this clears the DOWN bit */
+               i40e_vsi_open(vsi);   /* this clears the DOWN bit */
 }
 
 /**
@@ -4031,6 +4024,8 @@ static void i40e_dcb_reconfigure(struct i40e_pf *pf)
                                 pf->vsi[v]->seid);
                        /* Will try to configure as many components */
                } else {
+                       /* Re-configure VSI vectors based on updated TC map */
+                       i40e_vsi_map_rings_to_vectors(pf->vsi[v]);
                        if (pf->vsi[v]->netdev)
                                i40e_dcbnl_set_all(pf->vsi[v]);
                }
@@ -4070,6 +4065,9 @@ static int i40e_init_pf_dcb(struct i40e_pf *pf)
                                       DCB_CAP_DCBX_VER_IEEE;
                        pf->flags |= I40E_FLAG_DCB_ENABLED;
                }
+       } else {
+               dev_info(&pf->pdev->dev, "AQ Querying DCB configuration failed: %d\n",
+                        pf->hw.aq.asq_last_status);
        }
 
 out:
@@ -4271,6 +4269,14 @@ static int i40e_open(struct net_device *netdev)
        if (err)
                return err;
 
+       /* configure global TSO hardware offload settings */
+       wr32(&pf->hw, I40E_GLLAN_TSOMSK_F, be32_to_cpu(TCP_FLAG_PSH |
+                                                      TCP_FLAG_FIN) >> 16);
+       wr32(&pf->hw, I40E_GLLAN_TSOMSK_M, be32_to_cpu(TCP_FLAG_PSH |
+                                                      TCP_FLAG_FIN |
+                                                      TCP_FLAG_CWR) >> 16);
+       wr32(&pf->hw, I40E_GLLAN_TSOMSK_L, be32_to_cpu(TCP_FLAG_CWR) >> 16);
+
 #ifdef CONFIG_I40E_VXLAN
        vxlan_get_rx_port(netdev);
 #endif
@@ -4304,24 +4310,32 @@ int i40e_vsi_open(struct i40e_vsi *vsi)
        if (err)
                goto err_setup_rx;
 
-       if (!vsi->netdev) {
-               err = EINVAL;
-               goto err_setup_rx;
-       }
-       snprintf(int_name, sizeof(int_name) - 1, "%s-%s",
-                dev_driver_string(&pf->pdev->dev), vsi->netdev->name);
-       err = i40e_vsi_request_irq(vsi, int_name);
-       if (err)
-               goto err_setup_rx;
+       if (vsi->netdev) {
+               snprintf(int_name, sizeof(int_name) - 1, "%s-%s",
+                        dev_driver_string(&pf->pdev->dev), vsi->netdev->name);
+               err = i40e_vsi_request_irq(vsi, int_name);
+               if (err)
+                       goto err_setup_rx;
 
-       /* Notify the stack of the actual queue counts. */
-       err = netif_set_real_num_tx_queues(vsi->netdev, vsi->num_queue_pairs);
-       if (err)
-               goto err_set_queues;
+               /* Notify the stack of the actual queue counts. */
+               err = netif_set_real_num_tx_queues(vsi->netdev,
+                                                  vsi->num_queue_pairs);
+               if (err)
+                       goto err_set_queues;
 
-       err = netif_set_real_num_rx_queues(vsi->netdev, vsi->num_queue_pairs);
-       if (err)
-               goto err_set_queues;
+               err = netif_set_real_num_rx_queues(vsi->netdev,
+                                                  vsi->num_queue_pairs);
+               if (err)
+                       goto err_set_queues;
+
+       } else if (vsi->type == I40E_VSI_FDIR) {
+               snprintf(int_name, sizeof(int_name) - 1, "%s-fdir",
+                        dev_driver_string(&pf->pdev->dev));
+               err = i40e_vsi_request_irq(vsi, int_name);
+       } else {
+               err = -EINVAL;
+               goto err_setup_rx;
+       }
 
        err = i40e_up_complete(vsi);
        if (err)
@@ -4378,14 +4392,7 @@ static int i40e_close(struct net_device *netdev)
        struct i40e_netdev_priv *np = netdev_priv(netdev);
        struct i40e_vsi *vsi = np->vsi;
 
-       if (test_and_set_bit(__I40E_DOWN, &vsi->state))
-               return 0;
-
-       i40e_down(vsi);
-       i40e_vsi_free_irq(vsi);
-
-       i40e_vsi_free_tx_resources(vsi);
-       i40e_vsi_free_rx_resources(vsi);
+       i40e_vsi_close(vsi);
 
        return 0;
 }
@@ -5221,9 +5228,6 @@ static int i40e_get_capabilities(struct i40e_pf *pf)
                }
        } while (err);
 
-       /* increment MSI-X count because current FW skips one */
-       pf->hw.func_caps.num_msix_vectors++;
-
        if (((pf->hw.aq.fw_maj_ver == 2) && (pf->hw.aq.fw_min_ver < 22)) ||
            (pf->hw.aq.fw_maj_ver < 2)) {
                pf->hw.func_caps.num_msix_vectors++;
@@ -5262,8 +5266,7 @@ static int i40e_vsi_clear(struct i40e_vsi *vsi);
 static void i40e_fdir_sb_setup(struct i40e_pf *pf)
 {
        struct i40e_vsi *vsi;
-       bool new_vsi = false;
-       int err, i;
+       int i;
 
        if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
                return;
@@ -5283,47 +5286,12 @@ static void i40e_fdir_sb_setup(struct i40e_pf *pf)
                                     pf->vsi[pf->lan_vsi]->seid, 0);
                if (!vsi) {
                        dev_info(&pf->pdev->dev, "Couldn't create FDir VSI\n");
-                       goto err_vsi;
+                       pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
+                       return;
                }
-               new_vsi = true;
-       }
-       i40e_vsi_setup_irqhandler(vsi, i40e_fdir_clean_ring);
-
-       err = i40e_vsi_setup_tx_resources(vsi);
-       if (err)
-               goto err_setup_tx;
-       err = i40e_vsi_setup_rx_resources(vsi);
-       if (err)
-               goto err_setup_rx;
-
-       if (new_vsi) {
-               char int_name[IFNAMSIZ + 9];
-               err = i40e_vsi_configure(vsi);
-               if (err)
-                       goto err_setup_rx;
-               snprintf(int_name, sizeof(int_name) - 1, "%s-fdir",
-                        dev_driver_string(&pf->pdev->dev));
-               err = i40e_vsi_request_irq(vsi, int_name);
-               if (err)
-                       goto err_setup_rx;
-               err = i40e_up_complete(vsi);
-               if (err)
-                       goto err_up_complete;
-               clear_bit(__I40E_NEEDS_RESTART, &vsi->state);
        }
 
-       return;
-
-err_up_complete:
-       i40e_down(vsi);
-       i40e_vsi_free_irq(vsi);
-err_setup_rx:
-       i40e_vsi_free_rx_resources(vsi);
-err_setup_tx:
-       i40e_vsi_free_tx_resources(vsi);
-err_vsi:
-       pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
-       i40e_vsi_clear(vsi);
+       i40e_vsi_setup_irqhandler(vsi, i40e_fdir_clean_ring);
 }
 
 /**
@@ -5637,7 +5605,6 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf)
  **/
 static void i40e_sync_vxlan_filters_subtask(struct i40e_pf *pf)
 {
-       const int vxlan_hdr_qwords = 4;
        struct i40e_hw *hw = &pf->hw;
        i40e_status ret;
        u8 filter_index;
@@ -5655,7 +5622,6 @@ static void i40e_sync_vxlan_filters_subtask(struct i40e_pf *pf)
                        port = pf->vxlan_ports[i];
                        ret = port ?
                              i40e_aq_add_udp_tunnel(hw, ntohs(port),
-                                                    vxlan_hdr_qwords,
                                                     I40E_AQC_TUNNEL_TYPE_VXLAN,
                                                     &filter_index, NULL)
                              : i40e_aq_del_udp_tunnel(hw, i, NULL);
@@ -6402,6 +6368,10 @@ static int i40e_sw_init(struct i40e_pf *pf)
                    I40E_FLAG_MSIX_ENABLED    |
                    I40E_FLAG_RX_1BUF_ENABLED;
 
+       /* Set default ITR */
+       pf->rx_itr_default = I40E_ITR_DYNAMIC | I40E_ITR_RX_DEF;
+       pf->tx_itr_default = I40E_ITR_DYNAMIC | I40E_ITR_TX_DEF;
+
        /* Depending on PF configurations, it is possible that the RSS
         * maximum might end up larger than the available queues
         */
@@ -6644,6 +6614,96 @@ static void i40e_del_vxlan_port(struct net_device *netdev,
 }
 
 #endif
+#ifdef HAVE_FDB_OPS
+#ifdef USE_CONST_DEV_UC_CHAR
+static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
+                           struct net_device *dev,
+                           const unsigned char *addr,
+                           u16 flags)
+#else
+static int i40e_ndo_fdb_add(struct ndmsg *ndm,
+                           struct net_device *dev,
+                           unsigned char *addr,
+                           u16 flags)
+#endif
+{
+       struct i40e_netdev_priv *np = netdev_priv(dev);
+       struct i40e_pf *pf = np->vsi->back;
+       int err = 0;
+
+       if (!(pf->flags & I40E_FLAG_SRIOV_ENABLED))
+               return -EOPNOTSUPP;
+
+       /* Hardware does not support aging addresses so if a
+        * ndm_state is given only allow permanent addresses
+        */
+       if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
+               netdev_info(dev, "FDB only supports static addresses\n");
+               return -EINVAL;
+       }
+
+       if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
+               err = dev_uc_add_excl(dev, addr);
+       else if (is_multicast_ether_addr(addr))
+               err = dev_mc_add_excl(dev, addr);
+       else
+               err = -EINVAL;
+
+       /* Only return duplicate errors if NLM_F_EXCL is set */
+       if (err == -EEXIST && !(flags & NLM_F_EXCL))
+               err = 0;
+
+       return err;
+}
+
+#ifndef USE_DEFAULT_FDB_DEL_DUMP
+#ifdef USE_CONST_DEV_UC_CHAR
+static int i40e_ndo_fdb_del(struct ndmsg *ndm,
+                           struct net_device *dev,
+                           const unsigned char *addr)
+#else
+static int i40e_ndo_fdb_del(struct ndmsg *ndm,
+                           struct net_device *dev,
+                           unsigned char *addr)
+#endif
+{
+       struct i40e_netdev_priv *np = netdev_priv(dev);
+       struct i40e_pf *pf = np->vsi->back;
+       int err = -EOPNOTSUPP;
+
+       if (ndm->ndm_state & NUD_PERMANENT) {
+               netdev_info(dev, "FDB only supports static addresses\n");
+               return -EINVAL;
+       }
+
+       if (pf->flags & I40E_FLAG_SRIOV_ENABLED) {
+               if (is_unicast_ether_addr(addr))
+                       err = dev_uc_del(dev, addr);
+               else if (is_multicast_ether_addr(addr))
+                       err = dev_mc_del(dev, addr);
+               else
+                       err = -EINVAL;
+       }
+
+       return err;
+}
+
+static int i40e_ndo_fdb_dump(struct sk_buff *skb,
+                            struct netlink_callback *cb,
+                            struct net_device *dev,
+                            int idx)
+{
+       struct i40e_netdev_priv *np = netdev_priv(dev);
+       struct i40e_pf *pf = np->vsi->back;
+
+       if (pf->flags & I40E_FLAG_SRIOV_ENABLED)
+               idx = ndo_dflt_fdb_dump(skb, cb, dev, idx);
+
+       return idx;
+}
+
+#endif /* USE_DEFAULT_FDB_DEL_DUMP */
+#endif /* HAVE_FDB_OPS */
 static const struct net_device_ops i40e_netdev_ops = {
        .ndo_open               = i40e_open,
        .ndo_stop               = i40e_close,
@@ -6671,6 +6731,13 @@ static const struct net_device_ops i40e_netdev_ops = {
        .ndo_add_vxlan_port     = i40e_add_vxlan_port,
        .ndo_del_vxlan_port     = i40e_del_vxlan_port,
 #endif
+#ifdef HAVE_FDB_OPS
+       .ndo_fdb_add            = i40e_ndo_fdb_add,
+#ifndef USE_DEFAULT_FDB_DEL_DUMP
+       .ndo_fdb_del            = i40e_ndo_fdb_del,
+       .ndo_fdb_dump           = i40e_ndo_fdb_dump,
+#endif
+#endif
 };
 
 /**
@@ -6712,12 +6779,15 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
                           NETIF_F_HW_VLAN_CTAG_FILTER |
                           NETIF_F_IPV6_CSUM           |
                           NETIF_F_TSO                 |
+                          NETIF_F_TSO_ECN             |
                           NETIF_F_TSO6                |
                           NETIF_F_RXCSUM              |
-                          NETIF_F_NTUPLE              |
                           NETIF_F_RXHASH              |
                           0;
 
+       if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
+               netdev->features |= NETIF_F_NTUPLE;
+
        /* copy netdev features into list of user selectable features */
        netdev->hw_features |= netdev->features;
 
@@ -6976,11 +7046,7 @@ int i40e_vsi_release(struct i40e_vsi *vsi)
                                unregister_netdev(vsi->netdev);
                        }
                } else {
-                       if (!test_and_set_bit(__I40E_DOWN, &vsi->state))
-                               i40e_down(vsi);
-                       i40e_vsi_free_irq(vsi);
-                       i40e_vsi_free_tx_resources(vsi);
-                       i40e_vsi_free_rx_resources(vsi);
+                       i40e_vsi_close(vsi);
                }
                i40e_vsi_disable_irq(vsi);
        }
@@ -8084,6 +8150,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        u16 link_status;
        int err = 0;
        u32 len;
+       u32 i;
 
        err = pci_enable_device_mem(pdev);
        if (err)
@@ -8237,7 +8304,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        if (err) {
                dev_info(&pdev->dev, "init_pf_dcb failed: %d\n", err);
                pf->flags &= ~I40E_FLAG_DCB_ENABLED;
-               goto err_init_dcb;
+               /* Continue without DCB enabled */
        }
 #endif /* CONFIG_I40E_DCB */
 
@@ -8273,6 +8340,13 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                dev_info(&pdev->dev, "setup_pf_switch failed: %d\n", err);
                goto err_vsis;
        }
+       /* if FDIR VSI was set up, start it now */
+       for (i = 0; i < pf->hw.func_caps.num_vsis; i++) {
+               if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {
+                       i40e_vsi_open(pf->vsi[i]);
+                       break;
+               }
+       }
 
        /* The main driver is (mostly) up and happy. We need to set this state
         * before setting up the misc vector or we get a race and the vector
@@ -8294,6 +8368,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                }
        }
 
+#ifdef CONFIG_PCI_IOV
        /* prep for VF support */
        if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
            (pf->flags & I40E_FLAG_MSIX_ENABLED) &&
@@ -8316,6 +8391,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                                         err);
                }
        }
+#endif /* CONFIG_PCI_IOV */
 
        pfs_found++;
 
@@ -8326,6 +8402,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        dv.minor_version = DRV_VERSION_MINOR;
        dv.build_version = DRV_VERSION_BUILD;
        dv.subbuild_version = 0;
+       strncpy(dv.driver_string, DRV_VERSION, sizeof(dv.driver_string));
        i40e_aq_send_driver_version(&pf->hw, &dv, NULL);
 
        /* since everything's happy, start the service_task timer */
@@ -8367,9 +8444,6 @@ err_vsis:
 err_switch_setup:
        i40e_reset_interrupt_capability(pf);
        del_timer_sync(&pf->service_timer);
-#ifdef CONFIG_I40E_DCB
-err_init_dcb:
-#endif /* CONFIG_I40E_DCB */
 err_mac_addr:
 err_configure_lan_hmc:
        (void)i40e_shutdown_lan_hmc(hw);
index 262bdf11d221e5a30be53a2f57a09e1865b32e79..81299189a47d3e58b61e75dba58869a85e813f2e 100644 (file)
@@ -160,7 +160,7 @@ static i40e_status i40e_poll_sr_srctl_done_bit(struct i40e_hw *hw)
                udelay(5);
        }
        if (ret_code == I40E_ERR_TIMEOUT)
-               hw_dbg(hw, "Done bit in GLNVM_SRCTL not set");
+               hw_dbg(hw, "Done bit in GLNVM_SRCTL not set\n");
        return ret_code;
 }
 
index 9cd57e617959b1622ec57f8c8f6e2146c9c800a6..d351832bf2359a66b97a20b11b5f447ac1e9ce1c 100644 (file)
@@ -157,8 +157,8 @@ i40e_status i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent,
 i40e_status i40e_aq_start_lldp(struct i40e_hw *hw,
                                struct i40e_asq_cmd_details *cmd_details);
 i40e_status i40e_aq_add_udp_tunnel(struct i40e_hw *hw,
-                               u16 udp_port, u8 header_len,
-                               u8 protocol_index, u8 *filter_index,
+                               u16 udp_port, u8 protocol_index,
+                               u8 *filter_index,
                                struct i40e_asq_cmd_details *cmd_details);
 i40e_status i40e_aq_del_udp_tunnel(struct i40e_hw *hw, u8 index,
                                struct i40e_asq_cmd_details *cmd_details);
@@ -167,6 +167,9 @@ i40e_status i40e_aq_delete_element(struct i40e_hw *hw, u16 seid,
 i40e_status i40e_aq_mac_address_write(struct i40e_hw *hw,
                                    u16 flags, u8 *mac_addr,
                                    struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_config_vsi_bw_limit(struct i40e_hw *hw,
+                               u16 seid, u16 credit, u8 max_credit,
+                               struct i40e_asq_cmd_details *cmd_details);
 i40e_status i40e_aq_dcb_updated(struct i40e_hw *hw,
                                struct i40e_asq_cmd_details *cmd_details);
 i40e_status i40e_aq_set_hmc_resource_profile(struct i40e_hw *hw,
index e33ec6c842b7035acd8189520a4f410c495270f1..101f439acda6adfd7e57084865f3bd1b16d06362 100644 (file)
@@ -48,7 +48,6 @@
                                        I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT)
 #define I40E_PRTTSYN_CTL1_TSYNTYPE_V2  (0x2 << \
                                        I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT)
-#define I40E_PTP_TX_TIMEOUT  (HZ * 15)
 
 /**
  * i40e_ptp_read - Read the PHC time from the device
@@ -216,40 +215,6 @@ static int i40e_ptp_settime(struct ptp_clock_info *ptp,
        return 0;
 }
 
-/**
- * i40e_ptp_tx_work
- * @work: pointer to work struct
- *
- * This work function polls the PRTTSYN_STAT_0.TXTIME bit to determine when a
- * Tx timestamp event has occurred, in order to pass the Tx timestamp value up
- * the stack in the skb.
- */
-static void i40e_ptp_tx_work(struct work_struct *work)
-{
-       struct i40e_pf *pf = container_of(work, struct i40e_pf,
-                                         ptp_tx_work);
-       struct i40e_hw *hw = &pf->hw;
-       u32 prttsyn_stat_0;
-
-       if (!pf->ptp_tx_skb)
-               return;
-
-       if (time_is_before_jiffies(pf->ptp_tx_start +
-                                  I40E_PTP_TX_TIMEOUT)) {
-               dev_kfree_skb_any(pf->ptp_tx_skb);
-               pf->ptp_tx_skb = NULL;
-               pf->tx_hwtstamp_timeouts++;
-               dev_warn(&pf->pdev->dev, "clearing Tx timestamp hang");
-               return;
-       }
-
-       prttsyn_stat_0 = rd32(hw, I40E_PRTTSYN_STAT_0);
-       if (prttsyn_stat_0 & I40E_PRTTSYN_STAT_0_TXTIME_MASK)
-               i40e_ptp_tx_hwtstamp(pf);
-       else
-               schedule_work(&pf->ptp_tx_work);
-}
-
 /**
  * i40e_ptp_enable - Enable/disable ancillary features of the PHC subsystem
  * @ptp: The PTP clock structure
@@ -321,7 +286,7 @@ void i40e_ptp_rx_hang(struct i40e_vsi *vsi)
                pf->last_rx_ptp_check = jiffies;
                pf->rx_hwtstamp_cleared++;
                dev_warn(&vsi->back->pdev->dev,
-                        "%s: clearing Rx timestamp hang",
+                        "%s: clearing Rx timestamp hang\n",
                         __func__);
        }
 }
@@ -608,7 +573,6 @@ void i40e_ptp_init(struct i40e_pf *pf)
                u32 regval;
 
                spin_lock_init(&pf->tmreg_lock);
-               INIT_WORK(&pf->ptp_tx_work, i40e_ptp_tx_work);
 
                dev_info(&pf->pdev->dev, "%s: added PHC on %s\n", __func__,
                         netdev->name);
@@ -647,7 +611,6 @@ void i40e_ptp_stop(struct i40e_pf *pf)
        pf->ptp_tx = false;
        pf->ptp_rx = false;
 
-       cancel_work_sync(&pf->ptp_tx_work);
        if (pf->ptp_tx_skb) {
                dev_kfree_skb_any(pf->ptp_tx_skb);
                pf->ptp_tx_skb = NULL;
index 0f5d96ad281d2400cbe4fbc47e886f9832758e4b..8d0ef445fa4416df82f6eabd7844c6a7d02d52b1 100644 (file)
@@ -24,6 +24,7 @@
  *
  ******************************************************************************/
 
+#include <linux/prefetch.h>
 #include "i40e.h"
 #include "i40e_prototype.h"
 
@@ -418,7 +419,7 @@ int i40e_add_del_fdir(struct i40e_vsi *vsi,
                }
                break;
        default:
-               dev_info(&pf->pdev->dev, "Could not specify spec type %d",
+               dev_info(&pf->pdev->dev, "Could not specify spec type %d\n",
                         input->flow_type);
                ret = -EINVAL;
        }
@@ -478,7 +479,7 @@ static void i40e_fd_handle_status(struct i40e_ring *rx_ring,
                                pf->flags |= I40E_FLAG_FDIR_REQUIRES_REINIT;
                        }
                } else {
-                       dev_info(&pdev->dev, "FD filter programming error");
+                       dev_info(&pdev->dev, "FD filter programming error\n");
                }
        } else if (error ==
                          (0x1 << I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT)) {
@@ -1713,9 +1714,11 @@ static int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
                                I40E_TX_FLAGS_VLAN_PRIO_SHIFT;
                if (tx_flags & I40E_TX_FLAGS_SW_VLAN) {
                        struct vlan_ethhdr *vhdr;
-                       if (skb_header_cloned(skb) &&
-                           pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
-                               return -ENOMEM;
+                       int rc;
+
+                       rc = skb_cow_head(skb, 0);
+                       if (rc < 0)
+                               return rc;
                        vhdr = (struct vlan_ethhdr *)skb->data;
                        vhdr->h_vlan_TCI = htons(tx_flags >>
                                                 I40E_TX_FLAGS_VLAN_SHIFT);
@@ -1743,20 +1746,18 @@ static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb,
                    u64 *cd_type_cmd_tso_mss, u32 *cd_tunneling)
 {
        u32 cd_cmd, cd_tso_len, cd_mss;
+       struct ipv6hdr *ipv6h;
        struct tcphdr *tcph;
        struct iphdr *iph;
        u32 l4len;
        int err;
-       struct ipv6hdr *ipv6h;
 
        if (!skb_is_gso(skb))
                return 0;
 
-       if (skb_header_cloned(skb)) {
-               err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
-               if (err)
-                       return err;
-       }
+       err = skb_cow_head(skb, 0);
+       if (err < 0)
+               return err;
 
        if (protocol == htons(ETH_P_IP)) {
                iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb);
@@ -1825,9 +1826,6 @@ static int i40e_tsyn(struct i40e_ring *tx_ring, struct sk_buff *skb,
        *cd_type_cmd_tso_mss |= (u64)I40E_TX_CTX_DESC_TSYN <<
                                I40E_TXD_CTX_QW1_CMD_SHIFT;
 
-       pf->ptp_tx_start = jiffies;
-       schedule_work(&pf->ptp_tx_work);
-
        return 1;
 }
 
index 71a968fe557f33e12f6feb81d416f4f5a55946fb..c4df8bac2db17df4987d64195af5a61e02c93cd6 100644 (file)
@@ -167,6 +167,9 @@ struct i40e_link_status {
        u8 loopback;
        /* is Link Status Event notification to SW enabled */
        bool lse_enable;
+       u16 max_frame_size;
+       bool crc_enable;
+       u8 pacing;
 };
 
 struct i40e_phy_info {
@@ -409,6 +412,7 @@ struct i40e_driver_version {
        u8 minor_version;
        u8 build_version;
        u8 subbuild_version;
+       u8 driver_string[32];
 };
 
 /* RX Descriptors */
index 02c11a7f7d29e80c533b48894e5ee0b5619a9c91..4d219566a04d1c6cd4e480371e2eec6aad63a3e7 100644 (file)
 
 /***********************misc routines*****************************/
 
+/**
+ * i40e_vc_disable_vf
+ * @pf: pointer to the pf info
+ * @vf: pointer to the vf info
+ *
+ * Disable the VF through a SW reset
+ **/
+static inline void i40e_vc_disable_vf(struct i40e_pf *pf, struct i40e_vf *vf)
+{
+       struct i40e_hw *hw = &pf->hw;
+       u32 reg;
+
+       reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id));
+       reg |= I40E_VPGEN_VFRTRIG_VFSWR_MASK;
+       wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
+       i40e_flush(hw);
+}
+
 /**
  * i40e_vc_isvalid_vsi_id
  * @vf: pointer to the vf info
@@ -416,6 +434,15 @@ static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type)
        if (ret)
                dev_err(&pf->pdev->dev, "Unable to program ucast filters\n");
 
+       /* Set VF bandwidth if specified */
+       if (vf->tx_rate) {
+               ret = i40e_aq_config_vsi_bw_limit(&pf->hw, vsi->seid,
+                                                 vf->tx_rate / 50, 0, NULL);
+               if (ret)
+                       dev_err(&pf->pdev->dev, "Unable to set tx rate, VF %d, error code %d.\n",
+                               vf->vf_id, ret);
+       }
+
 error_alloc_vsi_res:
        return ret;
 }
@@ -815,6 +842,10 @@ void i40e_free_vfs(struct i40e_pf *pf)
        kfree(pf->vf);
        pf->vf = NULL;
 
+       /* This check is for when the driver is unloaded while VFs are
+        * assigned. Setting the number of VFs to 0 through sysfs is caught
+        * before this function ever gets called.
+        */
        if (!i40e_vfs_are_assigned(pf)) {
                pci_disable_sriov(pf->pdev);
                /* Acknowledge VFLR for all VFS. Without this, VFs will fail to
@@ -951,7 +982,12 @@ int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
        if (num_vfs)
                return i40e_pci_sriov_enable(pdev, num_vfs);
 
-       i40e_free_vfs(pf);
+       if (!i40e_vfs_are_assigned(pf)) {
+               i40e_free_vfs(pf);
+       } else {
+               dev_warn(&pdev->dev, "Unable to free VFs because some are assigned to VMs.\n");
+               return -EINVAL;
+       }
        return 0;
 }
 
@@ -2022,10 +2058,11 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
        }
 
        /* delete the temporary mac address */
-       i40e_del_filter(vsi, vf->default_lan_addr.addr, 0, true, false);
+       i40e_del_filter(vsi, vf->default_lan_addr.addr, vf->port_vlan_id,
+                       true, false);
 
        /* add the new mac address */
-       f = i40e_add_filter(vsi, mac, 0, true, false);
+       f = i40e_add_filter(vsi, mac, vf->port_vlan_id, true, false);
        if (!f) {
                dev_err(&pf->pdev->dev,
                        "Unable to add VF ucast filter\n");
@@ -2088,18 +2125,28 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev,
                goto error_pvid;
        }
 
-       if (vsi->info.pvid == 0 && i40e_is_vsi_in_vlan(vsi))
+       if (vsi->info.pvid == 0 && i40e_is_vsi_in_vlan(vsi)) {
                dev_err(&pf->pdev->dev,
                        "VF %d has already configured VLAN filters and the administrator is requesting a port VLAN override.\nPlease unload and reload the VF driver for this change to take effect.\n",
                        vf_id);
+               /* Administrator Error - knock the VF offline until he does
+                * the right thing by reconfiguring his network correctly
+                * and then reloading the VF driver.
+                */
+               i40e_vc_disable_vf(pf, vf);
+       }
 
        /* Check for condition where there was already a port VLAN ID
         * filter set and now it is being deleted by setting it to zero.
+        * Additionally check for the condition where there was a port
+        * VLAN but now there is a new and different port VLAN being set.
         * Before deleting all the old VLAN filters we must add new ones
         * with -1 (I40E_VLAN_ANY) or otherwise we're left with all our
         * MAC addresses deleted.
         */
-       if (!(vlan_id || qos) && vsi->info.pvid)
+       if ((!(vlan_id || qos) ||
+           (vlan_id | qos) != le16_to_cpu(vsi->info.pvid)) &&
+           vsi->info.pvid)
                ret = i40e_vsi_add_vlan(vsi, I40E_VLAN_ANY);
 
        if (vsi->info.pvid) {
@@ -2160,7 +2207,61 @@ error_pvid:
  **/
 int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int tx_rate)
 {
-       return -EOPNOTSUPP;
+       struct i40e_netdev_priv *np = netdev_priv(netdev);
+       struct i40e_pf *pf = np->vsi->back;
+       struct i40e_vsi *vsi;
+       struct i40e_vf *vf;
+       int speed = 0;
+       int ret = 0;
+
+       /* validate the request */
+       if (vf_id >= pf->num_alloc_vfs) {
+               dev_err(&pf->pdev->dev, "Invalid VF Identifier %d.\n", vf_id);
+               ret = -EINVAL;
+               goto error;
+       }
+
+       vf = &(pf->vf[vf_id]);
+       vsi = pf->vsi[vf->lan_vsi_index];
+       if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) {
+               dev_err(&pf->pdev->dev, "Uninitialized VF %d.\n", vf_id);
+               ret = -EINVAL;
+               goto error;
+       }
+
+       switch (pf->hw.phy.link_info.link_speed) {
+       case I40E_LINK_SPEED_40GB:
+               speed = 40000;
+               break;
+       case I40E_LINK_SPEED_10GB:
+               speed = 10000;
+               break;
+       case I40E_LINK_SPEED_1GB:
+               speed = 1000;
+               break;
+       default:
+               break;
+       }
+
+       if (tx_rate > speed) {
+               dev_err(&pf->pdev->dev, "Invalid tx rate %d specified for vf %d.",
+                       tx_rate, vf->vf_id);
+               ret = -EINVAL;
+               goto error;
+       }
+
+       /* Tx rate credits are in values of 50Mbps, 0 is disabled*/
+       ret = i40e_aq_config_vsi_bw_limit(&pf->hw, vsi->seid, tx_rate / 50, 0,
+                                         NULL);
+       if (ret) {
+               dev_err(&pf->pdev->dev, "Unable to set tx rate, error code %d.\n",
+                       ret);
+               ret = -EIO;
+               goto error;
+       }
+       vf->tx_rate = tx_rate;
+error:
+       return ret;
 }
 
 /**
@@ -2200,10 +2301,17 @@ int i40e_ndo_get_vf_config(struct net_device *netdev,
 
        memcpy(&ivi->mac, vf->default_lan_addr.addr, ETH_ALEN);
 
-       ivi->tx_rate = 0;
+       ivi->tx_rate = vf->tx_rate;
        ivi->vlan = le16_to_cpu(vsi->info.pvid) & I40E_VLAN_MASK;
        ivi->qos = (le16_to_cpu(vsi->info.pvid) & I40E_PRIORITY_MASK) >>
                   I40E_VLAN_PRIORITY_SHIFT;
+       if (vf->link_forced == false)
+               ivi->linkstate = IFLA_VF_LINK_STATE_AUTO;
+       else if (vf->link_up == true)
+               ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE;
+       else
+               ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE;
+
        ret = 0;
 
 error_param:
index 389c47f396d5261d708228f48d8e1bf8ed90e4c5..ba3d1f8414beabdd1b6a0e61a524978f77831c42 100644 (file)
@@ -98,6 +98,7 @@ struct i40e_vf {
 
        unsigned long vf_caps;  /* vf's adv. capabilities */
        unsigned long vf_states;        /* vf's runtime states */
+       unsigned int tx_rate;   /* Tx bandwidth limit in Mbps */
        bool link_forced;
        bool link_up;           /* only valid if vf link is forced */
 };
index e09be37a07a8384f41731cfd9e7a744c646e7216..3a423836a565294aa82dadbeb93e4d45619ccd86 100644 (file)
@@ -1,7 +1,7 @@
 ################################################################################
 #
 # Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
-# Copyright(c) 2013 Intel Corporation.
+# Copyright(c) 2013 - 2014 Intel Corporation.
 #
 # This program is free software; you can redistribute it and/or modify it
 # under the terms and conditions of the GNU General Public License,
@@ -12,6 +12,9 @@
 # FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 # more details.
 #
+# You should have received a copy of the GNU General Public License along
+# with this program.  If not, see <http://www.gnu.org/licenses/>.
+#
 # The full GNU General Public License is included in this distribution in
 # the file called "COPYING".
 #
index 5470ce95936ed483abc547f1ef12b1ef30a34721..68b4aacd43f534b941cec1127f063eb5a2415996 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
  *
  * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -12,6 +12,9 @@
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
  *
+ * You should have received a copy of the GNU General Public License along
+ * with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
  * The full GNU General Public License is included in this distribution in
  * the file called "COPYING".
  *
 #include "i40e_adminq.h"
 #include "i40e_prototype.h"
 
+/**
+ * i40e_is_nvm_update_op - return true if this is an NVM update operation
+ * @desc: API request descriptor
+ **/
+static inline bool i40e_is_nvm_update_op(struct i40e_aq_desc *desc)
+{
+       return (desc->opcode == i40e_aqc_opc_nvm_erase) ||
+              (desc->opcode == i40e_aqc_opc_nvm_update);
+}
+
 /**
  *  i40e_adminq_init_regs - Initialize AdminQ registers
  *  @hw: pointer to the hardware structure
@@ -659,6 +672,12 @@ i40e_status i40evf_asq_send_command(struct i40e_hw *hw,
                goto asq_send_command_exit;
        }
 
+       if (i40e_is_nvm_update_op(desc) && hw->aq.nvm_busy) {
+               i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQTX: NVM busy.\n");
+               status = I40E_ERR_NVM;
+               goto asq_send_command_exit;
+       }
+
        details = I40E_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use);
        if (cmd_details) {
                *details = *cmd_details;
@@ -786,6 +805,9 @@ i40e_status i40evf_asq_send_command(struct i40e_hw *hw,
                hw->aq.asq_last_status = (enum i40e_admin_queue_err)retval;
        }
 
+       if (i40e_is_nvm_update_op(desc))
+               hw->aq.nvm_busy = true;
+
        /* update the error if time out occurred */
        if ((!cmd_completed) &&
            (!details->async && !details->postpone)) {
@@ -880,6 +902,9 @@ i40e_status i40evf_clean_arq_element(struct i40e_hw *hw,
                               e->msg_size);
        }
 
+       if (i40e_is_nvm_update_op(&e->desc))
+               hw->aq.nvm_busy = false;
+
        /* Restore the original datalen and buffer address in the desc,
         * FW updates datalen to indicate the event message
         * size
index 8f72c31d95cc85adafac60190967de3f607b4bd6..e3472c62e1554194740a9233aadedaa5f13077e3 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
  *
  * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -12,6 +12,9 @@
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
  *
+ * You should have received a copy of the GNU General Public License along
+ * with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
  * The full GNU General Public License is included in this distribution in
  * the file called "COPYING".
  *
@@ -87,6 +90,7 @@ struct i40e_adminq_info {
        u16 fw_min_ver;                 /* firmware minor version */
        u16 api_maj_ver;                /* api major version */
        u16 api_min_ver;                /* api minor version */
+       bool nvm_busy;
 
        struct mutex asq_mutex; /* Send queue lock */
        struct mutex arq_mutex; /* Receive queue lock */
index 97662b6bd98a3e5badd0660bfcc9c932500ae124..89d9209ff2bd20be69c3431f3ff6cb77308d8ca2 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
  *
  * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -12,6 +12,9 @@
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
  *
+ * You should have received a copy of the GNU General Public License along
+ * with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
  * The full GNU General Public License is included in this distribution in
  * the file called "COPYING".
  *
@@ -180,9 +183,6 @@ enum i40e_admin_queue_opc {
        i40e_aqc_opc_add_mirror_rule    = 0x0260,
        i40e_aqc_opc_delete_mirror_rule = 0x0261,
 
-       i40e_aqc_opc_set_storm_control_config = 0x0280,
-       i40e_aqc_opc_get_storm_control_config = 0x0281,
-
        /* DCB commands */
        i40e_aqc_opc_dcb_ignore_pfc = 0x0301,
        i40e_aqc_opc_dcb_updated    = 0x0302,
@@ -205,6 +205,7 @@ enum i40e_admin_queue_opc {
        i40e_aqc_opc_query_switching_comp_bw_config        = 0x041A,
        i40e_aqc_opc_suspend_port_tx                       = 0x041B,
        i40e_aqc_opc_resume_port_tx                        = 0x041C,
+       i40e_aqc_opc_configure_partition_bw                = 0x041D,
 
        /* hmc */
        i40e_aqc_opc_query_hmc_resource_profile = 0x0500,
@@ -678,7 +679,6 @@ struct i40e_aqc_add_get_update_vsi {
 #define I40E_AQ_VSI_TYPE_PF             0x2
 #define I40E_AQ_VSI_TYPE_EMP_MNG        0x3
 #define I40E_AQ_VSI_FLAG_CASCADED_PV    0x4
-#define I40E_AQ_VSI_FLAG_CLOUD_VSI      0x8
        __le32 addr_high;
        __le32 addr_low;
 };
@@ -1040,7 +1040,9 @@ struct i40e_aqc_set_vsi_promiscuous_modes {
 #define I40E_AQC_SET_VSI_PROMISC_VLAN        0x10
        __le16 seid;
 #define I40E_AQC_VSI_PROM_CMD_SEID_MASK      0x3FF
-       u8     reserved[10];
+       __le16 vlan_tag;
+#define I40E_AQC_SET_VSI_VLAN_VALID          0x8000
+       u8     reserved[8];
 };
 
 I40E_CHECK_CMD_LENGTH(i40e_aqc_set_vsi_promiscuous_modes);
@@ -1289,27 +1291,6 @@ struct i40e_aqc_add_delete_mirror_rule_completion {
 
 I40E_CHECK_CMD_LENGTH(i40e_aqc_add_delete_mirror_rule_completion);
 
-/* Set Storm Control Configuration (direct 0x0280)
- * Get Storm Control Configuration (direct 0x0281)
- *    the command and response use the same descriptor structure
- */
-struct i40e_aqc_set_get_storm_control_config {
-       __le32 broadcast_threshold;
-       __le32 multicast_threshold;
-       __le32 control_flags;
-#define I40E_AQC_STORM_CONTROL_MDIPW            0x01
-#define I40E_AQC_STORM_CONTROL_MDICW            0x02
-#define I40E_AQC_STORM_CONTROL_BDIPW            0x04
-#define I40E_AQC_STORM_CONTROL_BDICW            0x08
-#define I40E_AQC_STORM_CONTROL_BIDU             0x10
-#define I40E_AQC_STORM_CONTROL_INTERVAL_SHIFT   8
-#define I40E_AQC_STORM_CONTROL_INTERVAL_MASK    (0x3FF << \
-                                       I40E_AQC_STORM_CONTROL_INTERVAL_SHIFT)
-       u8     reserved[4];
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_set_get_storm_control_config);
-
 /* DCB 0x03xx*/
 
 /* PFC Ignore (direct 0x0301)
@@ -1499,6 +1480,15 @@ struct i40e_aqc_query_switching_comp_bw_config_resp {
  * (direct 0x041B and 0x041C) uses the generic SEID struct
  */
 
+/* Configure partition BW
+ * (indirect 0x041D)
+ */
+struct i40e_aqc_configure_partition_bw_data {
+       __le16 pf_valid_bits;
+       u8     min_bw[16];      /* guaranteed bandwidth */
+       u8     max_bw[16];      /* bandwidth limit */
+};
+
 /* Get and set the active HMC resource profile and status.
  * (direct 0x0500) and (direct 0x0501)
  */
@@ -1583,11 +1573,8 @@ struct i40e_aq_get_phy_abilities_resp {
 #define I40E_AQ_PHY_FLAG_PAUSE_TX         0x01
 #define I40E_AQ_PHY_FLAG_PAUSE_RX         0x02
 #define I40E_AQ_PHY_FLAG_LOW_POWER        0x04
-#define I40E_AQ_PHY_FLAG_AN_SHIFT         3
-#define I40E_AQ_PHY_FLAG_AN_MASK          (0x3 << I40E_AQ_PHY_FLAG_AN_SHIFT)
-#define I40E_AQ_PHY_FLAG_AN_OFF           0x00 /* link forced on */
-#define I40E_AQ_PHY_FLAG_AN_OFF_LINK_DOWN 0x01
-#define I40E_AQ_PHY_FLAG_AN_ON            0x02
+#define I40E_AQ_PHY_LINK_ENABLED                 0x08
+#define I40E_AQ_PHY_AN_ENABLED                   0x10
 #define I40E_AQ_PHY_FLAG_MODULE_QUAL      0x20
        __le16 eee_capability;
 #define I40E_AQ_EEE_100BASE_TX       0x0002
@@ -1948,19 +1935,12 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_start);
 /* Add Udp Tunnel command and completion (direct 0x0B00) */
 struct i40e_aqc_add_udp_tunnel {
        __le16 udp_port;
-       u8     header_len; /* in DWords, 1 to 15 */
+       u8     reserved0[3];
        u8     protocol_type;
-#define I40E_AQC_TUNNEL_TYPE_TEREDO    0x0
-#define I40E_AQC_TUNNEL_TYPE_VXLAN     0x2
-#define I40E_AQC_TUNNEL_TYPE_NGE       0x3
-       u8     variable_udp_length;
-#define I40E_AQC_TUNNEL_FIXED_UDP_LENGTH       0x0
-#define I40E_AQC_TUNNEL_VARIABLE_UDP_LENGTH    0x1
-       u8              udp_key_index;
-#define I40E_AQC_TUNNEL_KEY_INDEX_VXLAN                        0x0
-#define I40E_AQC_TUNNEL_KEY_INDEX_NGE                  0x1
-#define I40E_AQC_TUNNEL_KEY_INDEX_PROPRIETARY_UDP      0x2
-       u8              reserved[10];
+#define I40E_AQC_TUNNEL_TYPE_VXLAN     0x00
+#define I40E_AQC_TUNNEL_TYPE_NGE       0x01
+#define I40E_AQC_TUNNEL_TYPE_TEREDO    0x10
+       u8     reserved1[10];
 };
 
 I40E_CHECK_CMD_LENGTH(i40e_aqc_add_udp_tunnel);
index d8654fb9e525a82881c88cf882d91c716577ec0f..8e6a6dd9212bb0812f42378ad208ccd451e43ee4 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
  *
  * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -12,6 +12,9 @@
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
  *
+ * You should have received a copy of the GNU General Public License along
+ * with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
  * The full GNU General Public License is included in this distribution in
  * the file called "COPYING".
  *
index ae084378faabff7669794cd0f47a9072f1698243..ac660963b8b7099f61dda784989b797e338e1bea 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
  *
  * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -12,6 +12,9 @@
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
  *
+ * You should have received a copy of the GNU General Public License along
+ * with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
  * The full GNU General Public License is included in this distribution in
  * the file called "COPYING".
  *
index cb97b3eed440ff764c1a9dbf705e45bc9e308b2f..9d906514fc3d3474392102ea702cd40a10f027cc 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
  *
  * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -12,6 +12,9 @@
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
  *
+ * You should have received a copy of the GNU General Public License along
+ * with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
  * The full GNU General Public License is included in this distribution in
  * the file called "COPYING".
  *
index 17e42ca26d0ba045fc51a0efd1ec0b1af0fffd71..d6f762241537804be777b97ad5f29616f990058f 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
  *
  * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -12,6 +12,9 @@
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
  *
+ * You should have received a copy of the GNU General Public License along
+ * with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
  * The full GNU General Public License is included in this distribution in
  * the file called "COPYING".
  *
@@ -53,6 +56,7 @@ struct i40e_hmc_obj_rxq {
        u8  tphdata_ena;
        u8  tphhead_ena;
        u8  lrxqthresh;
+       u8  prefena;    /* NOTE: normally must be set to 1 at init */
 };
 
 /* Tx queue context data */
index 622f373b745d59092355131aa20f5f5c57d486c8..21a91b14bf819365bfea82276324b14da6ef403c 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
  *
  * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -12,6 +12,9 @@
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
  *
+ * You should have received a copy of the GNU General Public License along
+ * with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
  * The full GNU General Public License is included in this distribution in
  * the file called "COPYING".
  *
index 97ab8c2b76f8f0e6cf1c6c485eaeafee0e42a5a1..849edcc2e398f7bfdaaea5eaf078ba714977ca5f 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
  *
  * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -12,6 +12,9 @@
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
  *
+ * You should have received a copy of the GNU General Public License along
+ * with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
  * The full GNU General Public License is included in this distribution in
  * the file called "COPYING".
  *
index 30af953cf106a4afc6aa9e0939d24adce8d68418..aa4a92e3b1255132b7b32986a9011bc3d4011af3 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
  *
  * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -12,6 +12,9 @@
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
  *
+ * You should have received a copy of the GNU General Public License along
+ * with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
  * The full GNU General Public License is included in this distribution in
  * the file called "COPYING".
  *
index 7c08cc2e339b89f05d79d37dc5e46ba9921cb17b..7fa7a41915c1acce92ce1b8d46cc22d7043f96c2 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
  *
  * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -12,6 +12,9 @@
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
  *
+ * You should have received a copy of the GNU General Public License along
+ * with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
  * The full GNU General Public License is included in this distribution in
  * the file called "COPYING".
  *
index b9f50f40abe18b78cf0c3ff580ded29666491d92..82d6844245b59a6db0e03b0cccc188dffad3cbf8 100644 (file)
@@ -12,6 +12,9 @@
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
  *
+ * You should have received a copy of the GNU General Public License along
+ * with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
  * The full GNU General Public License is included in this distribution in
  * the file called "COPYING".
  *
index 10bf49e18d7f6c0ed8d1af47d5d30544c5a8951e..d0119d0a9fcfc73f658e9bf13bf6bbbc711f3e29 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
  *
  * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -12,6 +12,9 @@
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
  *
+ * You should have received a copy of the GNU General Public License along
+ * with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
  * The full GNU General Public License is included in this distribution in
  * the file called "COPYING".
  *
index 4673b3381eddaa1672edca1f60b85e89d33ab247..fb5371ad1cb9383b4cbbc704ede9314074bb4067 100644 (file)
@@ -12,6 +12,9 @@
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
  *
+ * You should have received a copy of the GNU General Public License along
+ * with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
  * The full GNU General Public License is included in this distribution in
  * the file called "COPYING".
  *
@@ -101,15 +104,6 @@ enum i40e_debug_mask {
        I40E_DEBUG_ALL                  = 0xFFFFFFFF
 };
 
-/* PCI Bus Info */
-#define I40E_PCI_LINK_WIDTH_1          0x10
-#define I40E_PCI_LINK_WIDTH_2          0x20
-#define I40E_PCI_LINK_WIDTH_4          0x40
-#define I40E_PCI_LINK_WIDTH_8          0x80
-#define I40E_PCI_LINK_SPEED_2500       0x1
-#define I40E_PCI_LINK_SPEED_5000       0x2
-#define I40E_PCI_LINK_SPEED_8000       0x3
-
 /* These are structs for managing the hardware information and the operations.
  * The structures of function pointers are filled out at init time when we
  * know for sure exactly which hardware we're working with.  This gives us the
@@ -173,6 +167,9 @@ struct i40e_link_status {
        u8 loopback;
        /* is Link Status Event notification to SW enabled */
        bool lse_enable;
+       u16 max_frame_size;
+       bool crc_enable;
+       u8 pacing;
 };
 
 struct i40e_phy_info {
@@ -415,6 +412,7 @@ struct i40e_driver_version {
        u8 minor_version;
        u8 build_version;
        u8 subbuild_version;
+       u8 driver_string[32];
 };
 
 /* RX Descriptors */
index ccf45d04b7ef88e74e99d5ad844ad10df8d9b4f8..1ef5b31ece909766dbc8ec3d5fee43aea5377893 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
  *
  * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -12,6 +12,9 @@
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
  *
+ * You should have received a copy of the GNU General Public License along
+ * with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
  * The full GNU General Public License is included in this distribution in
  * the file called "COPYING".
  *
index 807807d6238738c0e111739e9dc96d1f2200d77a..2913bc3332a1b1c23bf37d685b608ded39343c29 100644 (file)
@@ -12,6 +12,9 @@
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
  *
+ * You should have received a copy of the GNU General Public License along
+ * with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
  * The full GNU General Public License is included in this distribution in
  * the file called "COPYING".
  *
index 8b0db1ce179c5447ce83240098e6c076d6782ed6..df4dcfd364d868608d8782978e3b8f621a902e89 100644 (file)
@@ -12,6 +12,9 @@
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
  *
+ * You should have received a copy of the GNU General Public License along
+ * with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
  * The full GNU General Public License is included in this distribution in
  * the file called "COPYING".
  *
@@ -56,10 +59,12 @@ static const struct i40evf_stats i40evf_gstrings_stats[] = {
 };
 
 #define I40EVF_GLOBAL_STATS_LEN ARRAY_SIZE(i40evf_gstrings_stats)
-#define I40EVF_QUEUE_STATS_LEN \
+#define I40EVF_QUEUE_STATS_LEN(_dev) \
        (((struct i40evf_adapter *) \
-               netdev_priv(netdev))->vsi_res->num_queue_pairs * 4)
-#define I40EVF_STATS_LEN (I40EVF_GLOBAL_STATS_LEN + I40EVF_QUEUE_STATS_LEN)
+               netdev_priv(_dev))->vsi_res->num_queue_pairs \
+                 * 2 * (sizeof(struct i40e_queue_stats) / sizeof(u64)))
+#define I40EVF_STATS_LEN(_dev) \
+       (I40EVF_GLOBAL_STATS_LEN + I40EVF_QUEUE_STATS_LEN(_dev))
 
 /**
  * i40evf_get_settings - Get Link Speed and Duplex settings
@@ -75,7 +80,7 @@ static int i40evf_get_settings(struct net_device *netdev,
        /* In the future the VF will be able to query the PF for
         * some information - for now use a dummy value
         */
-       ecmd->supported = SUPPORTED_10000baseT_Full;
+       ecmd->supported = 0;
        ecmd->autoneg = AUTONEG_DISABLE;
        ecmd->transceiver = XCVR_DUMMY1;
        ecmd->port = PORT_NONE;
@@ -94,9 +99,9 @@ static int i40evf_get_settings(struct net_device *netdev,
 static int i40evf_get_sset_count(struct net_device *netdev, int sset)
 {
        if (sset == ETH_SS_STATS)
-               return I40EVF_STATS_LEN;
+               return I40EVF_STATS_LEN(netdev);
        else
-               return -ENOTSUPP;
+               return -EINVAL;
 }
 
 /**
@@ -290,14 +295,13 @@ static int i40evf_get_coalesce(struct net_device *netdev,
        ec->rx_max_coalesced_frames = vsi->work_limit;
 
        if (ITR_IS_DYNAMIC(vsi->rx_itr_setting))
-               ec->rx_coalesce_usecs = 1;
-       else
-               ec->rx_coalesce_usecs = vsi->rx_itr_setting;
+               ec->use_adaptive_rx_coalesce = 1;
 
        if (ITR_IS_DYNAMIC(vsi->tx_itr_setting))
-               ec->tx_coalesce_usecs = 1;
-       else
-               ec->tx_coalesce_usecs = vsi->tx_itr_setting;
+               ec->use_adaptive_tx_coalesce = 1;
+
+       ec->rx_coalesce_usecs = vsi->rx_itr_setting & ~I40E_ITR_DYNAMIC;
+       ec->tx_coalesce_usecs = vsi->tx_itr_setting & ~I40E_ITR_DYNAMIC;
 
        return 0;
 }
@@ -318,54 +322,358 @@ static int i40evf_set_coalesce(struct net_device *netdev,
        struct i40e_q_vector *q_vector;
        int i;
 
-       if (ec->tx_max_coalesced_frames || ec->rx_max_coalesced_frames)
-               vsi->work_limit = ec->tx_max_coalesced_frames;
+       if (ec->tx_max_coalesced_frames_irq || ec->rx_max_coalesced_frames_irq)
+               vsi->work_limit = ec->tx_max_coalesced_frames_irq;
+
+       if ((ec->rx_coalesce_usecs >= (I40E_MIN_ITR << 1)) &&
+           (ec->rx_coalesce_usecs <= (I40E_MAX_ITR << 1)))
+               vsi->rx_itr_setting = ec->rx_coalesce_usecs;
+
+       else
+               return -EINVAL;
+
+       if ((ec->tx_coalesce_usecs >= (I40E_MIN_ITR << 1)) &&
+           (ec->tx_coalesce_usecs <= (I40E_MAX_ITR << 1)))
+               vsi->tx_itr_setting = ec->tx_coalesce_usecs;
+       else if (ec->use_adaptive_tx_coalesce)
+               vsi->tx_itr_setting = (I40E_ITR_DYNAMIC |
+                                      ITR_REG_TO_USEC(I40E_ITR_RX_DEF));
+       else
+               return -EINVAL;
+
+       if (ec->use_adaptive_rx_coalesce)
+               vsi->rx_itr_setting |= I40E_ITR_DYNAMIC;
+       else
+               vsi->rx_itr_setting &= ~I40E_ITR_DYNAMIC;
+
+       if (ec->use_adaptive_tx_coalesce)
+               vsi->tx_itr_setting |= I40E_ITR_DYNAMIC;
+       else
+               vsi->tx_itr_setting &= ~I40E_ITR_DYNAMIC;
+
+       for (i = 0; i < adapter->num_msix_vectors - NONQ_VECS; i++) {
+               q_vector = adapter->q_vector[i];
+               q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting);
+               wr32(hw, I40E_VFINT_ITRN1(0, i), q_vector->rx.itr);
+               q_vector->tx.itr = ITR_TO_REG(vsi->tx_itr_setting);
+               wr32(hw, I40E_VFINT_ITRN1(1, i), q_vector->tx.itr);
+               i40e_flush(hw);
+       }
+
+       return 0;
+}
+
+/**
+ * i40e_get_rss_hash_opts - Get RSS hash Input Set for each flow type
+ * @adapter: board private structure
+ * @cmd: ethtool rxnfc command
+ *
+ * Returns Success if the flow is supported, else Invalid Input.
+ **/
+static int i40evf_get_rss_hash_opts(struct i40evf_adapter *adapter,
+                                   struct ethtool_rxnfc *cmd)
+{
+       struct i40e_hw *hw = &adapter->hw;
+       u64 hena = (u64)rd32(hw, I40E_VFQF_HENA(0)) |
+                  ((u64)rd32(hw, I40E_VFQF_HENA(1)) << 32);
+
+       /* We always hash on IP src and dest addresses */
+       cmd->data = RXH_IP_SRC | RXH_IP_DST;
 
-       switch (ec->rx_coalesce_usecs) {
-       case 0:
-               vsi->rx_itr_setting = 0;
+       switch (cmd->flow_type) {
+       case TCP_V4_FLOW:
+               if (hena & ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP))
+                       cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
                break;
-       case 1:
-               vsi->rx_itr_setting = (I40E_ITR_DYNAMIC
-                                      | ITR_REG_TO_USEC(I40E_ITR_RX_DEF));
+       case UDP_V4_FLOW:
+               if (hena & ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP))
+                       cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
                break;
-       default:
-               if ((ec->rx_coalesce_usecs < (I40E_MIN_ITR << 1)) ||
-                   (ec->rx_coalesce_usecs > (I40E_MAX_ITR << 1)))
-                       return -EINVAL;
-               vsi->rx_itr_setting = ec->rx_coalesce_usecs;
+
+       case SCTP_V4_FLOW:
+       case AH_ESP_V4_FLOW:
+       case AH_V4_FLOW:
+       case ESP_V4_FLOW:
+       case IPV4_FLOW:
+               break;
+
+       case TCP_V6_FLOW:
+               if (hena & ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP))
+                       cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+               break;
+       case UDP_V6_FLOW:
+               if (hena & ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP))
+                       cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+               break;
+
+       case SCTP_V6_FLOW:
+       case AH_ESP_V6_FLOW:
+       case AH_V6_FLOW:
+       case ESP_V6_FLOW:
+       case IPV6_FLOW:
                break;
+       default:
+               cmd->data = 0;
+               return -EINVAL;
        }
 
-       switch (ec->tx_coalesce_usecs) {
-       case 0:
-               vsi->tx_itr_setting = 0;
+       return 0;
+}
+
+/**
+ * i40evf_get_rxnfc - command to get RX flow classification rules
+ * @netdev: network interface device structure
+ * @cmd: ethtool rxnfc command
+ *
+ * Returns Success if the command is supported.
+ **/
+static int i40evf_get_rxnfc(struct net_device *netdev,
+                           struct ethtool_rxnfc *cmd,
+                           u32 *rule_locs)
+{
+       struct i40evf_adapter *adapter = netdev_priv(netdev);
+       int ret = -EOPNOTSUPP;
+
+       switch (cmd->cmd) {
+       case ETHTOOL_GRXRINGS:
+               cmd->data = adapter->vsi_res->num_queue_pairs;
+               ret = 0;
                break;
-       case 1:
-               vsi->tx_itr_setting = (I40E_ITR_DYNAMIC
-                                      | ITR_REG_TO_USEC(I40E_ITR_TX_DEF));
+       case ETHTOOL_GRXFH:
+               ret = i40evf_get_rss_hash_opts(adapter, cmd);
                break;
        default:
-               if ((ec->tx_coalesce_usecs < (I40E_MIN_ITR << 1)) ||
-                   (ec->tx_coalesce_usecs > (I40E_MAX_ITR << 1)))
+               break;
+       }
+
+       return ret;
+}
+
+/**
+ * i40evf_set_rss_hash_opt - Enable/Disable flow types for RSS hash
+ * @adapter: board private structure
+ * @cmd: ethtool rxnfc command
+ *
+ * Returns Success if the flow input set is supported.
+ **/
+static int i40evf_set_rss_hash_opt(struct i40evf_adapter *adapter,
+                                  struct ethtool_rxnfc *nfc)
+{
+       struct i40e_hw *hw = &adapter->hw;
+
+       u64 hena = (u64)rd32(hw, I40E_VFQF_HENA(0)) |
+                  ((u64)rd32(hw, I40E_VFQF_HENA(1)) << 32);
+
+       /* RSS does not support anything other than hashing
+        * to queues on src and dst IPs and ports
+        */
+       if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
+                         RXH_L4_B_0_1 | RXH_L4_B_2_3))
+               return -EINVAL;
+
+       /* We need at least the IP SRC and DEST fields for hashing */
+       if (!(nfc->data & RXH_IP_SRC) ||
+           !(nfc->data & RXH_IP_DST))
+               return -EINVAL;
+
+       switch (nfc->flow_type) {
+       case TCP_V4_FLOW:
+               switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
+               case 0:
+                       hena &= ~((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
+                       break;
+               case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
+                       hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
+                       break;
+               default:
                        return -EINVAL;
-               vsi->tx_itr_setting = ec->tx_coalesce_usecs;
+               }
+               break;
+       case TCP_V6_FLOW:
+               switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
+               case 0:
+                       hena &= ~((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
+                       break;
+               case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
+                       hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
+                       break;
+               default:
+                       return -EINVAL;
+               }
                break;
+       case UDP_V4_FLOW:
+               switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
+               case 0:
+                       hena &= ~(((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
+                                 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4));
+                       break;
+               case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
+                       hena |= (((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
+                                ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4));
+                       break;
+               default:
+                       return -EINVAL;
+               }
+               break;
+       case UDP_V6_FLOW:
+               switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
+               case 0:
+                       hena &= ~(((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
+                                 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6));
+                       break;
+               case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
+                       hena |= (((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
+                                ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6));
+                       break;
+               default:
+                       return -EINVAL;
+               }
+               break;
+       case AH_ESP_V4_FLOW:
+       case AH_V4_FLOW:
+       case ESP_V4_FLOW:
+       case SCTP_V4_FLOW:
+               if ((nfc->data & RXH_L4_B_0_1) ||
+                   (nfc->data & RXH_L4_B_2_3))
+                       return -EINVAL;
+               hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
+               break;
+       case AH_ESP_V6_FLOW:
+       case AH_V6_FLOW:
+       case ESP_V6_FLOW:
+       case SCTP_V6_FLOW:
+               if ((nfc->data & RXH_L4_B_0_1) ||
+                   (nfc->data & RXH_L4_B_2_3))
+                       return -EINVAL;
+               hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
+               break;
+       case IPV4_FLOW:
+               hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) |
+                       ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4);
+               break;
+       case IPV6_FLOW:
+               hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) |
+                       ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6);
+               break;
+       default:
+               return -EINVAL;
        }
 
-       for (i = 0; i < adapter->num_msix_vectors - NONQ_VECS; i++) {
-               q_vector = adapter->q_vector[i];
-               q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting);
-               wr32(hw, I40E_VFINT_ITRN1(0, i), q_vector->rx.itr);
-               q_vector->tx.itr = ITR_TO_REG(vsi->tx_itr_setting);
-               wr32(hw, I40E_VFINT_ITRN1(1, i), q_vector->tx.itr);
-               i40e_flush(hw);
+       wr32(hw, I40E_VFQF_HENA(0), (u32)hena);
+       wr32(hw, I40E_VFQF_HENA(1), (u32)(hena >> 32));
+       i40e_flush(hw);
+
+       return 0;
+}
+
+/**
+ * i40evf_set_rxnfc - command to set RX flow classification rules
+ * @netdev: network interface device structure
+ * @cmd: ethtool rxnfc command
+ *
+ * Returns Success if the command is supported.
+ **/
+static int i40evf_set_rxnfc(struct net_device *netdev,
+                           struct ethtool_rxnfc *cmd)
+{
+       struct i40evf_adapter *adapter = netdev_priv(netdev);
+       int ret = -EOPNOTSUPP;
+
+       switch (cmd->cmd) {
+       case ETHTOOL_SRXFH:
+               ret = i40evf_set_rss_hash_opt(adapter, cmd);
+               break;
+       default:
+               break;
+       }
+
+       return ret;
+}
+
+/**
+ * i40evf_get_channels: get the number of channels supported by the device
+ * @netdev: network interface device structure
+ * @ch: channel information structure
+ *
+ * For the purposes of our device, we only use combined channels, i.e. a tx/rx
+ * queue pair. Report one extra channel to match our "other" MSI-X vector.
+ **/
+static void i40evf_get_channels(struct net_device *netdev,
+                               struct ethtool_channels *ch)
+{
+       struct i40evf_adapter *adapter = netdev_priv(netdev);
+
+       /* Report maximum channels */
+       ch->max_combined = adapter->vsi_res->num_queue_pairs;
+
+       ch->max_other = NONQ_VECS;
+       ch->other_count = NONQ_VECS;
+
+       ch->combined_count = adapter->vsi_res->num_queue_pairs;
+}
+
+/**
+ * i40evf_get_rxfh_indir_size - get the rx flow hash indirection table size
+ * @netdev: network interface device structure
+ *
+ * Returns the table size.
+ **/
+static u32 i40evf_get_rxfh_indir_size(struct net_device *netdev)
+{
+       return (I40E_VFQF_HLUT_MAX_INDEX + 1) * 4;
+}
+
+/**
+ * i40evf_get_rxfh_indir - get the rx flow hash indirection table
+ * @netdev: network interface device structure
+ * @indir: indirection table
+ *
+ * Reads the indirection table directly from the hardware. Always returns 0.
+ **/
+static int i40evf_get_rxfh_indir(struct net_device *netdev, u32 *indir)
+{
+       struct i40evf_adapter *adapter = netdev_priv(netdev);
+       struct i40e_hw *hw = &adapter->hw;
+       u32 hlut_val;
+       int i, j;
+
+       for (i = 0, j = 0; i < I40E_VFQF_HLUT_MAX_INDEX; i++) {
+               hlut_val = rd32(hw, I40E_VFQF_HLUT(i));
+               indir[j++] = hlut_val & 0xff;
+               indir[j++] = (hlut_val >> 8) & 0xff;
+               indir[j++] = (hlut_val >> 16) & 0xff;
+               indir[j++] = (hlut_val >> 24) & 0xff;
+       }
+       return 0;
+}
+
+/**
+ * i40evf_set_rxfh_indir - set the rx flow hash indirection table
+ * @netdev: network interface device structure
+ * @indir: indirection table
+ *
+ * Returns -EINVAL if the table specifies an inavlid queue id, otherwise
+ * returns 0 after programming the table.
+ **/
+static int i40evf_set_rxfh_indir(struct net_device *netdev, const u32 *indir)
+{
+       struct i40evf_adapter *adapter = netdev_priv(netdev);
+       struct i40e_hw *hw = &adapter->hw;
+       u32 hlut_val;
+       int i, j;
+
+       for (i = 0, j = 0; i < I40E_VFQF_HLUT_MAX_INDEX + 1; i++) {
+               hlut_val = indir[j++];
+               hlut_val |= indir[j++] << 8;
+               hlut_val |= indir[j++] << 16;
+               hlut_val |= indir[j++] << 24;
+               wr32(hw, I40E_VFQF_HLUT(i), hlut_val);
        }
 
        return 0;
 }
 
-static struct ethtool_ops i40evf_ethtool_ops = {
+static const struct ethtool_ops i40evf_ethtool_ops = {
        .get_settings           = i40evf_get_settings,
        .get_drvinfo            = i40evf_get_drvinfo,
        .get_link               = ethtool_op_get_link,
@@ -378,6 +686,12 @@ static struct ethtool_ops i40evf_ethtool_ops = {
        .set_msglevel           = i40evf_set_msglevel,
        .get_coalesce           = i40evf_get_coalesce,
        .set_coalesce           = i40evf_set_coalesce,
+       .get_rxnfc              = i40evf_get_rxnfc,
+       .set_rxnfc              = i40evf_set_rxnfc,
+       .get_rxfh_indir_size    = i40evf_get_rxfh_indir_size,
+       .get_rxfh_indir         = i40evf_get_rxfh_indir,
+       .set_rxfh_indir         = i40evf_set_rxfh_indir,
+       .get_channels           = i40evf_get_channels,
 };
 
 /**
@@ -389,5 +703,5 @@ static struct ethtool_ops i40evf_ethtool_ops = {
  **/
 void i40evf_set_ethtool_ops(struct net_device *netdev)
 {
-       SET_ETHTOOL_OPS(netdev, &i40evf_ethtool_ops);
+       netdev->ethtool_ops = &i40evf_ethtool_ops;
 }
index 2797548fde0dd918051a8f033472749e86d124c9..6f6bd3f018011a5bda9d51800e8008986bb4573e 100644 (file)
@@ -12,6 +12,9 @@
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
  *
+ * You should have received a copy of the GNU General Public License along
+ * with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
  * The full GNU General Public License is included in this distribution in
  * the file called "COPYING".
  *
 #include "i40e_prototype.h"
 static int i40evf_setup_all_tx_resources(struct i40evf_adapter *adapter);
 static int i40evf_setup_all_rx_resources(struct i40evf_adapter *adapter);
+static void i40evf_free_all_tx_resources(struct i40evf_adapter *adapter);
+static void i40evf_free_all_rx_resources(struct i40evf_adapter *adapter);
 static int i40evf_close(struct net_device *netdev);
 
 char i40evf_driver_name[] = "i40evf";
 static const char i40evf_driver_string[] =
        "Intel(R) XL710 X710 Virtual Function Network Driver";
 
-#define DRV_VERSION "0.9.16"
+#define DRV_VERSION "0.9.23"
 const char i40evf_driver_version[] = DRV_VERSION;
 static const char i40evf_copyright[] =
        "Copyright (c) 2013 - 2014 Intel Corporation.";
@@ -1027,30 +1032,21 @@ i40evf_acquire_msix_vectors(struct i40evf_adapter *adapter, int vectors)
         * Right now, we simply care about how many we'll get; we'll
         * set them up later while requesting irq's.
         */
-       while (vectors >= vector_threshold) {
-               err = pci_enable_msix(adapter->pdev, adapter->msix_entries,
-                                     vectors);
-               if (!err) /* Success in acquiring all requested vectors. */
-                       break;
-               else if (err < 0)
-                       vectors = 0; /* Nasty failure, quit now */
-               else /* err == number of vectors we should try again with */
-                       vectors = err;
-       }
-
-       if (vectors < vector_threshold) {
+       err = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
+                                   vector_threshold, vectors);
+       if (err < 0) {
                dev_err(&adapter->pdev->dev, "Unable to allocate MSI-X interrupts.\n");
                kfree(adapter->msix_entries);
                adapter->msix_entries = NULL;
-               err = -EIO;
-       } else {
-               /* Adjust for only the vectors we'll use, which is minimum
-                * of max_msix_q_vectors + NONQ_VECS, or the number of
-                * vectors we were allocated.
-                */
-               adapter->num_msix_vectors = vectors;
+               return err;
        }
-       return err;
+
+       /* Adjust for only the vectors we'll use, which is minimum
+        * of max_msix_q_vectors + NONQ_VECS, or the number of
+        * vectors we were allocated.
+        */
+       adapter->num_msix_vectors = err;
+       return 0;
 }
 
 /**
@@ -1309,7 +1305,6 @@ static void i40evf_watchdog_task(struct work_struct *work)
                goto restart_watchdog;
 
        if (adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED) {
-               dev_info(&adapter->pdev->dev, "Checking for redemption\n");
                if ((rd32(hw, I40E_VFGEN_RSTAT) & 0x3) == I40E_VFR_VFACTIVE) {
                        /* A chance for redemption! */
                        dev_err(&adapter->pdev->dev, "Hardware came out of reset. Attempting reinit.\n");
@@ -1534,9 +1529,13 @@ static void i40evf_reset_task(struct work_struct *work)
                        rstat_val);
                adapter->flags |= I40EVF_FLAG_PF_COMMS_FAILED;
 
-               if (netif_running(adapter->netdev))
-                       i40evf_close(adapter->netdev);
-
+               if (netif_running(adapter->netdev)) {
+                       set_bit(__I40E_DOWN, &adapter->vsi.state);
+                       i40evf_down(adapter);
+                       i40evf_free_traffic_irqs(adapter);
+                       i40evf_free_all_tx_resources(adapter);
+                       i40evf_free_all_rx_resources(adapter);
+               }
                i40evf_free_misc_irq(adapter);
                i40evf_reset_interrupt_capability(adapter);
                i40evf_free_queues(adapter);
@@ -2114,8 +2113,10 @@ static void i40evf_init_task(struct work_struct *work)
        adapter->vsi.back = adapter;
        adapter->vsi.base_vector = 1;
        adapter->vsi.work_limit = I40E_DEFAULT_IRQ_WORK;
-       adapter->vsi.rx_itr_setting = I40E_ITR_DYNAMIC;
-       adapter->vsi.tx_itr_setting = I40E_ITR_DYNAMIC;
+       adapter->vsi.rx_itr_setting = (I40E_ITR_DYNAMIC |
+                                      ITR_REG_TO_USEC(I40E_ITR_RX_DEF));
+       adapter->vsi.tx_itr_setting = (I40E_ITR_DYNAMIC |
+                                      ITR_REG_TO_USEC(I40E_ITR_TX_DEF));
        adapter->vsi.netdev = adapter->netdev;
 
        if (!adapter->netdev_registered) {
index e294f012647d801417af4ca1f68d0629cbaf08cc..7f80bb4177225af7a026a59cd52e3ebe488b6a41 100644 (file)
@@ -12,6 +12,9 @@
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
  *
+ * You should have received a copy of the GNU General Public License along
+ * with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
  * The full GNU General Public License is included in this distribution in
  * the file called "COPYING".
  *
index fa36fe12e77502658cfe864780849d6f00e93c2e..2e36c670d8df48753d98f311d41b7c02eba2f66f 100644 (file)
@@ -1,28 +1,25 @@
-/*******************************************************************************
-
-  Intel(R) Gigabit Ethernet Linux driver
-  Copyright(c) 2007-2014 Intel Corporation.
-
-  This program is free software; you can redistribute it and/or modify it
-  under the terms and conditions of the GNU General Public License,
-  version 2, as published by the Free Software Foundation.
-
-  This program is distributed in the hope it will be useful, but WITHOUT
-  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
-  more details.
-
-  You should have received a copy of the GNU General Public License along with
-  this program; if not, see <http://www.gnu.org/licenses/>.
-
-  The full GNU General Public License is included in this distribution in
-  the file called "COPYING".
-
-  Contact Information:
-  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
-  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
 
 /* e1000_82575
  * e1000_82576
@@ -73,9 +70,8 @@ static s32  igb_validate_nvm_checksum_82580(struct e1000_hw *hw);
 static s32  igb_update_nvm_checksum_82580(struct e1000_hw *hw);
 static s32 igb_validate_nvm_checksum_i350(struct e1000_hw *hw);
 static s32 igb_update_nvm_checksum_i350(struct e1000_hw *hw);
-static const u16 e1000_82580_rxpbs_table[] =
-       { 36, 72, 144, 1, 2, 4, 8, 16,
-         35, 70, 140 };
+static const u16 e1000_82580_rxpbs_table[] = {
+       36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140 };
 
 /**
  *  igb_sgmii_uses_mdio_82575 - Determine if I2C pins are for external MDIO
@@ -526,7 +522,7 @@ out:
 static s32 igb_get_invariants_82575(struct e1000_hw *hw)
 {
        struct e1000_mac_info *mac = &hw->mac;
-       struct e1000_dev_spec_82575 * dev_spec = &hw->dev_spec._82575;
+       struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575;
        s32 ret_val;
        u32 ctrl_ext = 0;
        u32 link_mode = 0;
@@ -1180,8 +1176,8 @@ static void igb_release_swfw_sync_82575(struct e1000_hw *hw, u16 mask)
 {
        u32 swfw_sync;
 
-       while (igb_get_hw_semaphore(hw) != 0);
-       /* Empty */
+       while (igb_get_hw_semaphore(hw) != 0)
+               ; /* Empty */
 
        swfw_sync = rd32(E1000_SW_FW_SYNC);
        swfw_sync &= ~mask;
@@ -1216,7 +1212,7 @@ static s32 igb_get_cfg_done_82575(struct e1000_hw *hw)
        while (timeout) {
                if (rd32(E1000_EEMNGCTL) & mask)
                        break;
-               msleep(1);
+               usleep_range(1000, 2000);
                timeout--;
        }
        if (!timeout)
@@ -1269,7 +1265,7 @@ static s32 igb_check_for_link_82575(struct e1000_hw *hw)
 
        if (hw->phy.media_type != e1000_media_type_copper) {
                ret_val = igb_get_pcs_speed_and_duplex_82575(hw, &speed,
-                                                            &duplex);
+                                                            &duplex);
                /* Use this flag to determine if link needs to be checked or
                 * not.  If  we have link clear the flag so that we do not
                 * continue to check for link.
@@ -1316,7 +1312,7 @@ void igb_power_up_serdes_link_82575(struct e1000_hw *hw)
 
        /* flush the write to verify completion */
        wrfl();
-       msleep(1);
+       usleep_range(1000, 2000);
 }
 
 /**
@@ -1411,7 +1407,7 @@ void igb_shutdown_serdes_link_82575(struct e1000_hw *hw)
 
                /* flush the write to verify completion */
                wrfl();
-               msleep(1);
+               usleep_range(1000, 2000);
        }
 }
 
@@ -1436,9 +1432,8 @@ static s32 igb_reset_hw_82575(struct e1000_hw *hw)
 
        /* set the completion timeout for interface */
        ret_val = igb_set_pcie_completion_timeout(hw);
-       if (ret_val) {
+       if (ret_val)
                hw_dbg("PCI-E Set completion timeout has failed.\n");
-       }
 
        hw_dbg("Masking off all interrupts\n");
        wr32(E1000_IMC, 0xffffffff);
@@ -1447,7 +1442,7 @@ static s32 igb_reset_hw_82575(struct e1000_hw *hw)
        wr32(E1000_TCTL, E1000_TCTL_PSP);
        wrfl();
 
-       msleep(10);
+       usleep_range(10000, 20000);
 
        ctrl = rd32(E1000_CTRL);
 
@@ -1676,7 +1671,7 @@ static s32 igb_setup_serdes_link_82575(struct e1000_hw *hw)
                    hw->mac.type == e1000_82576) {
                        ret_val = hw->nvm.ops.read(hw, NVM_COMPAT, 1, &data);
                        if (ret_val) {
-                               printk(KERN_DEBUG "NVM Read Error\n\n");
+                               hw_dbg(KERN_DEBUG "NVM Read Error\n\n");
                                return ret_val;
                        }
 
@@ -1689,7 +1684,7 @@ static s32 igb_setup_serdes_link_82575(struct e1000_hw *hw)
                 * link either autoneg or be forced to 1000/Full
                 */
                ctrl_reg |= E1000_CTRL_SPD_1000 | E1000_CTRL_FRCSPD |
-                           E1000_CTRL_FD | E1000_CTRL_FRCDPX;
+                               E1000_CTRL_FD | E1000_CTRL_FRCDPX;
 
                /* set speed of 1000/Full if speed/duplex is forced */
                reg |= E1000_PCS_LCTL_FSV_1000 | E1000_PCS_LCTL_FDV_FULL;
@@ -1925,7 +1920,7 @@ void igb_rx_fifo_flush_82575(struct e1000_hw *hw)
        }
        /* Poll all queues to verify they have shut down */
        for (ms_wait = 0; ms_wait < 10; ms_wait++) {
-               msleep(1);
+               usleep_range(1000, 2000);
                rx_enabled = 0;
                for (i = 0; i < 4; i++)
                        rx_enabled |= rd32(E1000_RXDCTL(i));
@@ -1953,7 +1948,7 @@ void igb_rx_fifo_flush_82575(struct e1000_hw *hw)
        wr32(E1000_RCTL, temp_rctl);
        wr32(E1000_RCTL, temp_rctl | E1000_RCTL_EN);
        wrfl();
-       msleep(2);
+       usleep_range(2000, 3000);
 
        /* Enable RX queues that were previously enabled and restore our
         * previous state
@@ -2005,14 +2000,14 @@ static s32 igb_set_pcie_completion_timeout(struct e1000_hw *hw)
         * 16ms to 55ms
         */
        ret_val = igb_read_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2,
-                                       &pcie_devctl2);
+                                       &pcie_devctl2);
        if (ret_val)
                goto out;
 
        pcie_devctl2 |= PCIE_DEVICE_CONTROL2_16ms;
 
        ret_val = igb_write_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2,
-                                        &pcie_devctl2);
+                                        &pcie_devctl2);
 out:
        /* disable completion timeout resend */
        gcr &= ~E1000_GCR_CMPL_TMOUT_RESEND;
@@ -2241,7 +2236,7 @@ static s32 igb_reset_hw_82580(struct e1000_hw *hw)
        wr32(E1000_TCTL, E1000_TCTL_PSP);
        wrfl();
 
-       msleep(10);
+       usleep_range(10000, 11000);
 
        /* Determine whether or not a global dev reset is requested */
        if (global_device_reset &&
@@ -2259,7 +2254,7 @@ static s32 igb_reset_hw_82580(struct e1000_hw *hw)
 
        /* Add delay to insure DEV_RST has time to complete */
        if (global_device_reset)
-               msleep(5);
+               usleep_range(5000, 6000);
 
        ret_val = igb_get_auto_rd_done(hw);
        if (ret_val) {
@@ -2436,8 +2431,7 @@ static s32 igb_update_nvm_checksum_82580(struct e1000_hw *hw)
 
        ret_val = hw->nvm.ops.read(hw, NVM_COMPATIBILITY_REG_3, 1, &nvm_data);
        if (ret_val) {
-               hw_dbg("NVM Read Error while updating checksum"
-                       " compatibility bit.\n");
+               hw_dbg("NVM Read Error while updating checksum compatibility bit.\n");
                goto out;
        }
 
@@ -2447,8 +2441,7 @@ static s32 igb_update_nvm_checksum_82580(struct e1000_hw *hw)
                ret_val = hw->nvm.ops.write(hw, NVM_COMPATIBILITY_REG_3, 1,
                                        &nvm_data);
                if (ret_val) {
-                       hw_dbg("NVM Write Error while updating checksum"
-                               " compatibility bit.\n");
+                       hw_dbg("NVM Write Error while updating checksum compatibility bit.\n");
                        goto out;
                }
        }
index 09d78be72416563beeda5e3cb72a7338b2aa7060..b407c55738fadf0a333ee7947fedd2c0c31638a1 100644 (file)
@@ -1,28 +1,25 @@
-/*******************************************************************************
-
-  Intel(R) Gigabit Ethernet Linux driver
-  Copyright(c) 2007-2014 Intel Corporation.
-
-  This program is free software; you can redistribute it and/or modify it
-  under the terms and conditions of the GNU General Public License,
-  version 2, as published by the Free Software Foundation.
-
-  This program is distributed in the hope it will be useful, but WITHOUT
-  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
-  more details.
-
-  You should have received a copy of the GNU General Public License along with
-  this program; if not, see <http://www.gnu.org/licenses/>.
-
-  The full GNU General Public License is included in this distribution in
-  the file called "COPYING".
-
-  Contact Information:
-  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
-  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
 
 #ifndef _E1000_82575_H_
 #define _E1000_82575_H_
@@ -37,9 +34,9 @@ s32 igb_write_i2c_byte(struct e1000_hw *hw, u8 byte_offset, u8 dev_addr,
                       u8 data);
 
 #define ID_LED_DEFAULT_82575_SERDES ((ID_LED_DEF1_DEF2 << 12) | \
-                                     (ID_LED_DEF1_DEF2 <<  8) | \
-                                     (ID_LED_DEF1_DEF2 <<  4) | \
-                                     (ID_LED_OFF1_ON2))
+                                    (ID_LED_DEF1_DEF2 <<  8) | \
+                                    (ID_LED_DEF1_DEF2 <<  4) | \
+                                    (ID_LED_OFF1_ON2))
 
 #define E1000_RAR_ENTRIES_82575        16
 #define E1000_RAR_ENTRIES_82576        24
@@ -67,16 +64,16 @@ s32 igb_write_i2c_byte(struct e1000_hw *hw, u8 byte_offset, u8 dev_addr,
 #define E1000_MRQC_RSS_FIELD_IPV6_UDP_EX    0x01000000
 
 #define E1000_EICR_TX_QUEUE ( \
-    E1000_EICR_TX_QUEUE0 |    \
-    E1000_EICR_TX_QUEUE1 |    \
-    E1000_EICR_TX_QUEUE2 |    \
-    E1000_EICR_TX_QUEUE3)
+       E1000_EICR_TX_QUEUE0 |    \
+       E1000_EICR_TX_QUEUE1 |    \
+       E1000_EICR_TX_QUEUE2 |    \
+       E1000_EICR_TX_QUEUE3)
 
 #define E1000_EICR_RX_QUEUE ( \
-    E1000_EICR_RX_QUEUE0 |    \
-    E1000_EICR_RX_QUEUE1 |    \
-    E1000_EICR_RX_QUEUE2 |    \
-    E1000_EICR_RX_QUEUE3)
+       E1000_EICR_RX_QUEUE0 |    \
+       E1000_EICR_RX_QUEUE1 |    \
+       E1000_EICR_RX_QUEUE2 |    \
+       E1000_EICR_RX_QUEUE3)
 
 /* Immediate Interrupt Rx (A.K.A. Low Latency Interrupt) */
 #define E1000_IMIREXT_SIZE_BP     0x00001000  /* Packet size bypass */
@@ -92,8 +89,7 @@ union e1000_adv_rx_desc {
                struct {
                        struct {
                                __le16 pkt_info;   /* RSS type, Packet type */
-                               __le16 hdr_info;   /* Split Header,
-                                                   * header buffer length */
+                               __le16 hdr_info;   /* Split Head, buf len */
                        } lo_dword;
                        union {
                                __le32 rss;          /* RSS Hash */
index b05bf925ac721982d8ded6d3aa647d64236890f4..f85be6695e44857b7e149184405a1f06a9d2fab3 100644 (file)
@@ -1,28 +1,25 @@
-/*******************************************************************************
-
-  Intel(R) Gigabit Ethernet Linux driver
-  Copyright(c) 2007-2014 Intel Corporation.
-
-  This program is free software; you can redistribute it and/or modify it
-  under the terms and conditions of the GNU General Public License,
-  version 2, as published by the Free Software Foundation.
-
-  This program is distributed in the hope it will be useful, but WITHOUT
-  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
-  more details.
-
-  You should have received a copy of the GNU General Public License along with
-  this program; if not, see <http://www.gnu.org/licenses/>.
-
-  The full GNU General Public License is included in this distribution in
-  the file called "COPYING".
-
-  Contact Information:
-  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
-  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
 
 #ifndef _E1000_DEFINES_H_
 #define _E1000_DEFINES_H_
 
 /* Same mask, but for extended and packet split descriptors */
 #define E1000_RXDEXT_ERR_FRAME_ERR_MASK ( \
-    E1000_RXDEXT_STATERR_CE  |            \
-    E1000_RXDEXT_STATERR_SE  |            \
-    E1000_RXDEXT_STATERR_SEQ |            \
-    E1000_RXDEXT_STATERR_CXE |            \
-    E1000_RXDEXT_STATERR_RXE)
+       E1000_RXDEXT_STATERR_CE  |            \
+       E1000_RXDEXT_STATERR_SE  |            \
+       E1000_RXDEXT_STATERR_SEQ |            \
+       E1000_RXDEXT_STATERR_CXE |            \
+       E1000_RXDEXT_STATERR_RXE)
 
 #define E1000_MRQC_RSS_FIELD_IPV4_TCP          0x00010000
 #define E1000_MRQC_RSS_FIELD_IPV4              0x00020000
 #define E1000_TCTL_RTLC   0x01000000    /* Re-transmit on late collision */
 
 /* DMA Coalescing register fields */
-#define E1000_DMACR_DMACWT_MASK         0x00003FFF /* DMA Coalescing
-                                                       * Watchdog Timer */
-#define E1000_DMACR_DMACTHR_MASK        0x00FF0000 /* DMA Coalescing Receive
-                                                       * Threshold */
+#define E1000_DMACR_DMACWT_MASK         0x00003FFF /* DMA Coal Watchdog Timer */
+#define E1000_DMACR_DMACTHR_MASK        0x00FF0000 /* DMA Coal Rx Threshold */
 #define E1000_DMACR_DMACTHR_SHIFT       16
-#define E1000_DMACR_DMAC_LX_MASK        0x30000000 /* Lx when no PCIe
-                                                       * transactions */
+#define E1000_DMACR_DMAC_LX_MASK        0x30000000 /* Lx when no PCIe trans */
 #define E1000_DMACR_DMAC_LX_SHIFT       28
 #define E1000_DMACR_DMAC_EN             0x80000000 /* Enable DMA Coalescing */
 /* DMA Coalescing BMC-to-OS Watchdog Enable */
 #define E1000_DMACR_DC_BMC2OSW_EN      0x00008000
 
-#define E1000_DMCTXTH_DMCTTHR_MASK      0x00000FFF /* DMA Coalescing Transmit
-                                                       * Threshold */
+#define E1000_DMCTXTH_DMCTTHR_MASK      0x00000FFF /* DMA Coal Tx Threshold */
 
 #define E1000_DMCTLX_TTLX_MASK          0x00000FFF /* Time to LX request */
 
-#define E1000_DMCRTRH_UTRESH_MASK       0x0007FFFF /* Receive Traffic Rate
-                                                       * Threshold */
-#define E1000_DMCRTRH_LRPRCW            0x80000000 /* Rcv packet rate in
-                                                       * current window */
+#define E1000_DMCRTRH_UTRESH_MASK       0x0007FFFF /* Rx Traffic Rate Thresh */
+#define E1000_DMCRTRH_LRPRCW            0x80000000 /* Rx pkt rate curr window */
 
-#define E1000_DMCCNT_CCOUNT_MASK        0x01FFFFFF /* DMA Coal Rcv Traffic
-                                                       * Current Cnt */
+#define E1000_DMCCNT_CCOUNT_MASK        0x01FFFFFF /* DMA Coal Rx Current Cnt */
 
-#define E1000_FCRTC_RTH_COAL_MASK       0x0003FFF0 /* Flow ctrl Rcv Threshold
-                                                       * High val */
+#define E1000_FCRTC_RTH_COAL_MASK       0x0003FFF0 /* FC Rx Thresh High val */
 #define E1000_FCRTC_RTH_COAL_SHIFT      4
 #define E1000_PCIEMISC_LX_DECISION      0x00000080 /* Lx power decision */
 
  *   o LSC    = Link Status Change
  */
 #define IMS_ENABLE_MASK ( \
-    E1000_IMS_RXT0   |    \
-    E1000_IMS_TXDW   |    \
-    E1000_IMS_RXDMT0 |    \
-    E1000_IMS_RXSEQ  |    \
-    E1000_IMS_LSC    |    \
-    E1000_IMS_DOUTSYNC)
+       E1000_IMS_RXT0   |    \
+       E1000_IMS_TXDW   |    \
+       E1000_IMS_RXDMT0 |    \
+       E1000_IMS_RXSEQ  |    \
+       E1000_IMS_LSC    |    \
+       E1000_IMS_DOUTSYNC)
 
 /* Interrupt Mask Set */
 #define E1000_IMS_TXDW      E1000_ICR_TXDW      /* Transmit desc written back */
 #define E1000_VFTA_ENTRY_BIT_SHIFT_MASK      0x1F
 
 /* DMA Coalescing register fields */
-#define E1000_PCIEMISC_LX_DECISION      0x00000080 /* Lx power decision based
-                                                      on DMA coal */
+#define E1000_PCIEMISC_LX_DECISION      0x00000080 /* Lx power on DMA coal */
 
 /* Tx Rate-Scheduler Config fields */
 #define E1000_RTTBCNRC_RS_ENA          0x80000000
index 10741d170f2ddad46b2fad14d362998bdc19d639..89925e4058498ea1c1ffda3195576d8abcda611e 100644 (file)
@@ -1,28 +1,24 @@
-/*******************************************************************************
-
-  Intel(R) Gigabit Ethernet Linux driver
-  Copyright(c) 2007-2014 Intel Corporation.
-
-  This program is free software; you can redistribute it and/or modify it
-  under the terms and conditions of the GNU General Public License,
-  version 2, as published by the Free Software Foundation.
-
-  This program is distributed in the hope it will be useful, but WITHOUT
-  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
-  more details.
-
-  You should have received a copy of the GNU General Public License along with
-  this program; if not, see <http://www.gnu.org/licenses/>.
-
-  The full GNU General Public License is included in this distribution in
-  the file called "COPYING".
-
-  Contact Information:
-  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
-  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
 
 #ifndef _E1000_HW_H_
 #define _E1000_HW_H_
@@ -320,15 +316,15 @@ struct e1000_host_mng_command_info {
 #include "e1000_mbx.h"
 
 struct e1000_mac_operations {
-       s32  (*check_for_link)(struct e1000_hw *);
-       s32  (*reset_hw)(struct e1000_hw *);
-       s32  (*init_hw)(struct e1000_hw *);
+       s32 (*check_for_link)(struct e1000_hw *);
+       s32 (*reset_hw)(struct e1000_hw *);
+       s32 (*init_hw)(struct e1000_hw *);
        bool (*check_mng_mode)(struct e1000_hw *);
-       s32  (*setup_physical_interface)(struct e1000_hw *);
+       s32 (*setup_physical_interface)(struct e1000_hw *);
        void (*rar_set)(struct e1000_hw *, u8 *, u32);
-       s32  (*read_mac_addr)(struct e1000_hw *);
-       s32  (*get_speed_and_duplex)(struct e1000_hw *, u16 *, u16 *);
-       s32  (*acquire_swfw_sync)(struct e1000_hw *, u16);
+       s32 (*read_mac_addr)(struct e1000_hw *);
+       s32 (*get_speed_and_duplex)(struct e1000_hw *, u16 *, u16 *);
+       s32 (*acquire_swfw_sync)(struct e1000_hw *, u16);
        void (*release_swfw_sync)(struct e1000_hw *, u16);
 #ifdef CONFIG_IGB_HWMON
        s32 (*get_thermal_sensor_data)(struct e1000_hw *);
@@ -338,31 +334,31 @@ struct e1000_mac_operations {
 };
 
 struct e1000_phy_operations {
-       s32  (*acquire)(struct e1000_hw *);
-       s32  (*check_polarity)(struct e1000_hw *);
-       s32  (*check_reset_block)(struct e1000_hw *);
-       s32  (*force_speed_duplex)(struct e1000_hw *);
-       s32  (*get_cfg_done)(struct e1000_hw *hw);
-       s32  (*get_cable_length)(struct e1000_hw *);
-       s32  (*get_phy_info)(struct e1000_hw *);
-       s32  (*read_reg)(struct e1000_hw *, u32, u16 *);
+       s32 (*acquire)(struct e1000_hw *);
+       s32 (*check_polarity)(struct e1000_hw *);
+       s32 (*check_reset_block)(struct e1000_hw *);
+       s32 (*force_speed_duplex)(struct e1000_hw *);
+       s32 (*get_cfg_done)(struct e1000_hw *hw);
+       s32 (*get_cable_length)(struct e1000_hw *);
+       s32 (*get_phy_info)(struct e1000_hw *);
+       s32 (*read_reg)(struct e1000_hw *, u32, u16 *);
        void (*release)(struct e1000_hw *);
-       s32  (*reset)(struct e1000_hw *);
-       s32  (*set_d0_lplu_state)(struct e1000_hw *, bool);
-       s32  (*set_d3_lplu_state)(struct e1000_hw *, bool);
-       s32  (*write_reg)(struct e1000_hw *, u32, u16);
+       s32 (*reset)(struct e1000_hw *);
+       s32 (*set_d0_lplu_state)(struct e1000_hw *, bool);
+       s32 (*set_d3_lplu_state)(struct e1000_hw *, bool);
+       s32 (*write_reg)(struct e1000_hw *, u32, u16);
        s32 (*read_i2c_byte)(struct e1000_hw *, u8, u8, u8 *);
        s32 (*write_i2c_byte)(struct e1000_hw *, u8, u8, u8);
 };
 
 struct e1000_nvm_operations {
-       s32  (*acquire)(struct e1000_hw *);
-       s32  (*read)(struct e1000_hw *, u16, u16, u16 *);
+       s32 (*acquire)(struct e1000_hw *);
+       s32 (*read)(struct e1000_hw *, u16, u16, u16 *);
        void (*release)(struct e1000_hw *);
-       s32  (*write)(struct e1000_hw *, u16, u16, u16 *);
-       s32  (*update)(struct e1000_hw *);
-       s32  (*validate)(struct e1000_hw *);
-       s32  (*valid_led_default)(struct e1000_hw *, u16 *);
+       s32 (*write)(struct e1000_hw *, u16, u16, u16 *);
+       s32 (*update)(struct e1000_hw *);
+       s32 (*validate)(struct e1000_hw *);
+       s32 (*valid_led_default)(struct e1000_hw *, u16 *);
 };
 
 #define E1000_MAX_SENSORS              3
index db963397cc27f42fd15829ec6dc540e19af5f562..2231598fb42d12833c8c4a134e7e888080e02df4 100644 (file)
@@ -1,28 +1,25 @@
-/*******************************************************************************
-
-  Intel(R) Gigabit Ethernet Linux driver
-  Copyright(c) 2007-2014 Intel Corporation.
-
-  This program is free software; you can redistribute it and/or modify it
-  under the terms and conditions of the GNU General Public License,
-  version 2, as published by the Free Software Foundation.
-
-  This program is distributed in the hope it will be useful, but WITHOUT
-  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
-  more details.
-
-  You should have received a copy of the GNU General Public License along with
-  this program; if not, see <http://www.gnu.org/licenses/>.
-
-  The full GNU General Public License is included in this distribution in
-  the file called "COPYING".
-
-  Contact Information:
-  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
-  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-******************************************************************************/
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
 
 /* e1000_i210
  * e1000_i211
@@ -365,7 +362,7 @@ static s32 igb_read_invm_word_i210(struct e1000_hw *hw, u8 address, u16 *data)
                        word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
                        if (word_address == address) {
                                *data = INVM_DWORD_TO_WORD_DATA(invm_dword);
-                               hw_dbg("Read INVM Word 0x%02x = %x",
+                               hw_dbg("Read INVM Word 0x%02x = %x\n",
                                          address, *data);
                                status = E1000_SUCCESS;
                                break;
@@ -435,6 +432,7 @@ static s32 igb_read_invm_i210(struct e1000_hw *hw, u16 offset,
                        *data = ID_LED_RESERVED_FFFF;
                        ret_val = E1000_SUCCESS;
                }
+               break;
        case NVM_SUB_DEV_ID:
                *data = hw->subsystem_device_id;
                break;
index 907fe99a9813130e45a3dddf0d5d48c6dfdc492d..9f34976687baedc7eb4d4844678cb2592c10e9d1 100644 (file)
@@ -1,28 +1,25 @@
-/*******************************************************************************
-
-  Intel(R) Gigabit Ethernet Linux driver
-  Copyright(c) 2007-2014 Intel Corporation.
-
-  This program is free software; you can redistribute it and/or modify it
-  under the terms and conditions of the GNU General Public License,
-  version 2, as published by the Free Software Foundation.
-
-  This program is distributed in the hope it will be useful, but WITHOUT
-  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
-  more details.
-
-  You should have received a copy of the GNU General Public License along with
-  this program; if not, see <http://www.gnu.org/licenses/>.
-
-  The full GNU General Public License is included in this distribution in
-  the file called "COPYING".
-
-  Contact Information:
-  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
-  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
 
 #ifndef _E1000_I210_H_
 #define _E1000_I210_H_
index 5910a932ea7c92cb67223a7c900f7c3b3e36a990..2a88595f956cf4e3089d20a986f0adb9db48681e 100644 (file)
@@ -1,28 +1,25 @@
-/*******************************************************************************
-
-  Intel(R) Gigabit Ethernet Linux driver
-  Copyright(c) 2007-2014 Intel Corporation.
-
-  This program is free software; you can redistribute it and/or modify it
-  under the terms and conditions of the GNU General Public License,
-  version 2, as published by the Free Software Foundation.
-
-  This program is distributed in the hope it will be useful, but WITHOUT
-  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
-  more details.
-
-  You should have received a copy of the GNU General Public License along with
-  this program; if not, see <http://www.gnu.org/licenses/>.
-
-  The full GNU General Public License is included in this distribution in
-  the file called "COPYING".
-
-  Contact Information:
-  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
-  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
 
 #include <linux/if_ether.h>
 #include <linux/delay.h>
@@ -442,7 +439,7 @@ static u32 igb_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr)
  *  The caller must have a packed mc_addr_list of multicast addresses.
  **/
 void igb_update_mc_addr_list(struct e1000_hw *hw,
-                             u8 *mc_addr_list, u32 mc_addr_count)
+                            u8 *mc_addr_list, u32 mc_addr_count)
 {
        u32 hash_value, hash_bit, hash_reg;
        int i;
@@ -866,8 +863,7 @@ s32 igb_config_fc_after_link_up(struct e1000_hw *hw)
                        goto out;
 
                if (!(mii_status_reg & MII_SR_AUTONEG_COMPLETE)) {
-                       hw_dbg("Copper PHY and Auto Neg "
-                                "has not completed.\n");
+                       hw_dbg("Copper PHY and Auto Neg has not completed.\n");
                        goto out;
                }
 
@@ -929,11 +925,10 @@ s32 igb_config_fc_after_link_up(struct e1000_hw *hw)
                         */
                        if (hw->fc.requested_mode == e1000_fc_full) {
                                hw->fc.current_mode = e1000_fc_full;
-                               hw_dbg("Flow Control = FULL.\r\n");
+                               hw_dbg("Flow Control = FULL.\n");
                        } else {
                                hw->fc.current_mode = e1000_fc_rx_pause;
-                               hw_dbg("Flow Control = "
-                                      "RX PAUSE frames only.\r\n");
+                               hw_dbg("Flow Control = RX PAUSE frames only.\n");
                        }
                }
                /* For receiving PAUSE frames ONLY.
@@ -948,7 +943,7 @@ s32 igb_config_fc_after_link_up(struct e1000_hw *hw)
                          (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
                          (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
                        hw->fc.current_mode = e1000_fc_tx_pause;
-                       hw_dbg("Flow Control = TX PAUSE frames only.\r\n");
+                       hw_dbg("Flow Control = TX PAUSE frames only.\n");
                }
                /* For transmitting PAUSE frames ONLY.
                 *
@@ -962,7 +957,7 @@ s32 igb_config_fc_after_link_up(struct e1000_hw *hw)
                         !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
                         (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
                        hw->fc.current_mode = e1000_fc_rx_pause;
-                       hw_dbg("Flow Control = RX PAUSE frames only.\r\n");
+                       hw_dbg("Flow Control = RX PAUSE frames only.\n");
                }
                /* Per the IEEE spec, at this point flow control should be
                 * disabled.  However, we want to consider that we could
@@ -988,10 +983,10 @@ s32 igb_config_fc_after_link_up(struct e1000_hw *hw)
                         (hw->fc.requested_mode == e1000_fc_tx_pause) ||
                         (hw->fc.strict_ieee)) {
                        hw->fc.current_mode = e1000_fc_none;
-                       hw_dbg("Flow Control = NONE.\r\n");
+                       hw_dbg("Flow Control = NONE.\n");
                } else {
                        hw->fc.current_mode = e1000_fc_rx_pause;
-                       hw_dbg("Flow Control = RX PAUSE frames only.\r\n");
+                       hw_dbg("Flow Control = RX PAUSE frames only.\n");
                }
 
                /* Now we need to do one last check...  If we auto-
@@ -1266,7 +1261,7 @@ s32 igb_get_auto_rd_done(struct e1000_hw *hw)
        while (i < AUTO_READ_DONE_TIMEOUT) {
                if (rd32(E1000_EECD) & E1000_EECD_AUTO_RD)
                        break;
-               msleep(1);
+               usleep_range(1000, 2000);
                i++;
        }
 
@@ -1299,7 +1294,7 @@ static s32 igb_valid_led_default(struct e1000_hw *hw, u16 *data)
        }
 
        if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) {
-               switch(hw->phy.media_type) {
+               switch (hw->phy.media_type) {
                case e1000_media_type_internal_serdes:
                        *data = ID_LED_DEFAULT_82575_SERDES;
                        break;
index 99299ba8ee3a2def53ab2e6fcc8c5c039aca0265..ea24961b0d705e557a6b9bd57772d984ab0927ae 100644 (file)
@@ -1,28 +1,25 @@
-/*******************************************************************************
-
-  Intel(R) Gigabit Ethernet Linux driver
-  Copyright(c) 2007-2014 Intel Corporation.
-
-  This program is free software; you can redistribute it and/or modify it
-  under the terms and conditions of the GNU General Public License,
-  version 2, as published by the Free Software Foundation.
-
-  This program is distributed in the hope it will be useful, but WITHOUT
-  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
-  more details.
-
-  You should have received a copy of the GNU General Public License along with
-  this program; if not, see <http://www.gnu.org/licenses/>.
-
-  The full GNU General Public License is included in this distribution in
-  the file called "COPYING".
-
-  Contact Information:
-  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
-  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
 
 #ifndef _E1000_MAC_H_
 #define _E1000_MAC_H_
index d5b121771c313716543b16f5a8464866afd4f8e2..162cc49345d09babbd7fab30ec0917215e1a9b64 100644 (file)
@@ -1,28 +1,25 @@
-/*******************************************************************************
-
-  Intel(R) Gigabit Ethernet Linux driver
-  Copyright(c) 2007-2014 Intel Corporation.
-
-  This program is free software; you can redistribute it and/or modify it
-  under the terms and conditions of the GNU General Public License,
-  version 2, as published by the Free Software Foundation.
-
-  This program is distributed in the hope it will be useful, but WITHOUT
-  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
-  more details.
-
-  You should have received a copy of the GNU General Public License along with
-  this program; if not, see <http://www.gnu.org/licenses/>.
-
-  The full GNU General Public License is included in this distribution in
-  the file called "COPYING".
-
-  Contact Information:
-  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
-  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
 
 #include "e1000_mbx.h"
 
index f52f5515e5a8a8aedcc1568f90fdf1b986b2947e..d20af6b2f581698098a972d557a0a93fe19d1d48 100644 (file)
@@ -1,28 +1,25 @@
-/*******************************************************************************
-
-  Intel(R) Gigabit Ethernet Linux driver
-  Copyright(c) 2007-2014 Intel Corporation.
-
-  This program is free software; you can redistribute it and/or modify it
-  under the terms and conditions of the GNU General Public License,
-  version 2, as published by the Free Software Foundation.
-
-  This program is distributed in the hope it will be useful, but WITHOUT
-  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
-  more details.
-
-  You should have received a copy of the GNU General Public License along with
-  this program; if not, see <http://www.gnu.org/licenses/>.
-
-  The full GNU General Public License is included in this distribution in
-  the file called "COPYING".
-
-  Contact Information:
-  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
-  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
 
 #ifndef _E1000_MBX_H_
 #define _E1000_MBX_H_
index 9abf82919c65535d7b3fd7f7d7200f80fffd1f0a..92bcdbe756b2027a57fba9e1903dce1e8e810cdb 100644 (file)
@@ -1,28 +1,24 @@
-/*******************************************************************************
-
-  Intel(R) Gigabit Ethernet Linux driver
-  Copyright(c) 2007-2014 Intel Corporation.
-
-  This program is free software; you can redistribute it and/or modify it
-  under the terms and conditions of the GNU General Public License,
-  version 2, as published by the Free Software Foundation.
-
-  This program is distributed in the hope it will be useful, but WITHOUT
-  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
-  more details.
-
-  You should have received a copy of the GNU General Public License along with
-  this program; if not, see <http://www.gnu.org/licenses/>.
-
-  The full GNU General Public License is included in this distribution in
-  the file called "COPYING".
-
-  Contact Information:
-  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
-  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2014 Intel Corporation.
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
 
 #include <linux/if_ether.h>
 #include <linux/delay.h>
@@ -480,6 +476,7 @@ s32 igb_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
                /* Loop to allow for up to whole page write of eeprom */
                while (widx < words) {
                        u16 word_out = data[widx];
+
                        word_out = (word_out >> 8) | (word_out << 8);
                        igb_shift_out_eec_bits(hw, word_out, 16);
                        widx++;
index 5b101170b17e4bbc9af310c8aacd5e0b891344a0..febc9cdb739125174e143b0159feb0b529cb5ac6 100644 (file)
@@ -1,28 +1,25 @@
-/*******************************************************************************
-
-  Intel(R) Gigabit Ethernet Linux driver
-  Copyright(c) 2007-2014 Intel Corporation.
-
-  This program is free software; you can redistribute it and/or modify it
-  under the terms and conditions of the GNU General Public License,
-  version 2, as published by the Free Software Foundation.
-
-  This program is distributed in the hope it will be useful, but WITHOUT
-  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
-  more details.
-
-  You should have received a copy of the GNU General Public License along with
-  this program; if not, see <http://www.gnu.org/licenses/>.
-
-  The full GNU General Public License is included in this distribution in
-  the file called "COPYING".
-
-  Contact Information:
-  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
-  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
 
 #ifndef _E1000_NVM_H_
 #define _E1000_NVM_H_
@@ -32,7 +29,7 @@ void igb_release_nvm(struct e1000_hw *hw);
 s32  igb_read_mac_addr(struct e1000_hw *hw);
 s32  igb_read_part_num(struct e1000_hw *hw, u32 *part_num);
 s32  igb_read_part_string(struct e1000_hw *hw, u8 *part_num,
-                          u32 part_num_size);
+                         u32 part_num_size);
 s32  igb_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
 s32  igb_read_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
 s32  igb_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
index 4009bbab7407d21945e7c1af20df5deedba564a3..424f16c43759b1b787e44cb4d7c8194ec327c825 100644 (file)
@@ -1,28 +1,25 @@
-/*******************************************************************************
-
-  Intel(R) Gigabit Ethernet Linux driver
-  Copyright(c) 2007-2014 Intel Corporation.
-
-  This program is free software; you can redistribute it and/or modify it
-  under the terms and conditions of the GNU General Public License,
-  version 2, as published by the Free Software Foundation.
-
-  This program is distributed in the hope it will be useful, but WITHOUT
-  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
-  more details.
-
-  You should have received a copy of the GNU General Public License along with
-  this program; if not, see <http://www.gnu.org/licenses/>.
-
-  The full GNU General Public License is included in this distribution in
-  the file called "COPYING".
-
-  Contact Information:
-  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
-  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
 
 #include <linux/if_ether.h>
 #include <linux/delay.h>
@@ -924,8 +921,7 @@ static s32 igb_copper_link_autoneg(struct e1000_hw *hw)
        if (phy->autoneg_wait_to_complete) {
                ret_val = igb_wait_autoneg(hw);
                if (ret_val) {
-                       hw_dbg("Error while waiting for "
-                              "autoneg to complete\n");
+                       hw_dbg("Error while waiting for autoneg to complete\n");
                        goto out;
                }
        }
@@ -2244,7 +2240,7 @@ void igb_power_down_phy_copper(struct e1000_hw *hw)
                hw->phy.ops.write_reg(hw, GS40G_COPPER_SPEC, power_reg);
        }
        hw->phy.ops.write_reg(hw, PHY_CONTROL, mii_reg);
-       msleep(1);
+       usleep_range(1000, 2000);
 }
 
 /**
index 4c2c36c46a7398d217c1418b3966b4cde5813812..fe921e29dda8f96a201b82d70152466c15a1da26 100644 (file)
@@ -1,28 +1,25 @@
-/*******************************************************************************
-
-  Intel(R) Gigabit Ethernet Linux driver
-  Copyright(c) 2007-2014 Intel Corporation.
-
-  This program is free software; you can redistribute it and/or modify it
-  under the terms and conditions of the GNU General Public License,
-  version 2, as published by the Free Software Foundation.
-
-  This program is distributed in the hope it will be useful, but WITHOUT
-  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
-  more details.
-
-  You should have received a copy of the GNU General Public License along with
-  this program; if not, see <http://www.gnu.org/licenses/>.
-
-  The full GNU General Public License is included in this distribution in
-  the file called "COPYING".
-
-  Contact Information:
-  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
-  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
 
 #ifndef _E1000_PHY_H_
 #define _E1000_PHY_H_
index bdb246e848e13bb5e569f279336dbb5a2c5bfe86..833bbb948d970975cd3a42cfa759f7604f8f04a3 100644 (file)
@@ -1,28 +1,25 @@
-/*******************************************************************************
-
-  Intel(R) Gigabit Ethernet Linux driver
-  Copyright(c) 2007-2014 Intel Corporation.
-
-  This program is free software; you can redistribute it and/or modify it
-  under the terms and conditions of the GNU General Public License,
-  version 2, as published by the Free Software Foundation.
-
-  This program is distributed in the hope it will be useful, but WITHOUT
-  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
-  more details.
-
-  You should have received a copy of the GNU General Public License along with
-  this program; if not, see <http://www.gnu.org/licenses/>.
-
-  The full GNU General Public License is included in this distribution in
-  the file called "COPYING".
-
-  Contact Information:
-  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
-  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
 
 #ifndef _E1000_REGS_H_
 #define _E1000_REGS_H_
 #define E1000_RA2      0x054E0  /* 2nd half of Rx address array - RW Array */
 #define E1000_PSRTYPE(_i)       (0x05480 + ((_i) * 4))
 #define E1000_RAL(_i)  (((_i) <= 15) ? (0x05400 + ((_i) * 8)) : \
-                                       (0x054E0 + ((_i - 16) * 8)))
+                                       (0x054E0 + ((_i - 16) * 8)))
 #define E1000_RAH(_i)  (((_i) <= 15) ? (0x05404 + ((_i) * 8)) : \
-                                       (0x054E4 + ((_i - 16) * 8)))
+                                       (0x054E4 + ((_i - 16) * 8)))
 #define E1000_IP4AT_REG(_i)     (0x05840 + ((_i) * 8))
 #define E1000_IP6AT_REG(_i)     (0x05880 + ((_i) * 4))
 #define E1000_WUPM_REG(_i)      (0x05A00 + ((_i) * 4))
 #define E1000_VMBMEM(_n)       (0x00800 + (64 * (_n)))
 #define E1000_VMOLR(_n)        (0x05AD0 + (4 * (_n)))
 #define E1000_DVMOLR(_n)       (0x0C038 + (64 * (_n)))
-#define E1000_VLVF(_n)         (0x05D00 + (4 * (_n))) /* VLAN Virtual Machine
-                                                       * Filter - RW */
+#define E1000_VLVF(_n)         (0x05D00 + (4 * (_n))) /* VLAN VM Filter */
 #define E1000_VMVIR(_n)        (0x03700 + (4 * (_n)))
 
 struct e1000_hw;
index 27130065d92a70679292ef316aaadf848772944c..06102d1f7c0362208118ec99dcaba0e6db89eeb3 100644 (file)
@@ -1,29 +1,25 @@
-/*******************************************************************************
-
-  Intel(R) Gigabit Ethernet Linux driver
-  Copyright(c) 2007-2014 Intel Corporation.
-
-  This program is free software; you can redistribute it and/or modify it
-  under the terms and conditions of the GNU General Public License,
-  version 2, as published by the Free Software Foundation.
-
-  This program is distributed in the hope it will be useful, but WITHOUT
-  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
-  more details.
-
-  You should have received a copy of the GNU General Public License along with
-  this program; if not, see <http://www.gnu.org/licenses/>.
-
-  The full GNU General Public License is included in this distribution in
-  the file called "COPYING".
-
-  Contact Information:
-  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
-  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
-
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
 
 /* Linux PRO/1000 Ethernet Driver main header file */
 
@@ -198,6 +194,7 @@ struct igb_tx_buffer {
        unsigned int bytecount;
        u16 gso_segs;
        __be16 protocol;
+
        DEFINE_DMA_UNMAP_ADDR(dma);
        DEFINE_DMA_UNMAP_LEN(len);
        u32 tx_flags;
index e5570acbeea84509855a98383ab128876e7447c9..a84297c85fb1250342fa7b8d3e52a0a64be626a0 100644 (file)
@@ -1,28 +1,25 @@
-/*******************************************************************************
-
-  Intel(R) Gigabit Ethernet Linux driver
-  Copyright(c) 2007-2014 Intel Corporation.
-
-  This program is free software; you can redistribute it and/or modify it
-  under the terms and conditions of the GNU General Public License,
-  version 2, as published by the Free Software Foundation.
-
-  This program is distributed in the hope it will be useful, but WITHOUT
-  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
-  more details.
-
-  You should have received a copy of the GNU General Public License along with
-  this program; if not, see <http://www.gnu.org/licenses/>.
-
-  The full GNU General Public License is included in this distribution in
-  the file called "COPYING".
-
-  Contact Information:
-  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
-  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
 
 /* ethtool support for igb */
 
@@ -286,7 +283,7 @@ static int igb_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
        }
 
        while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
-               msleep(1);
+               usleep_range(1000, 2000);
 
        if (ecmd->autoneg == AUTONEG_ENABLE) {
                hw->mac.autoneg = 1;
@@ -399,7 +396,7 @@ static int igb_set_pauseparam(struct net_device *netdev,
        adapter->fc_autoneg = pause->autoneg;
 
        while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
-               msleep(1);
+               usleep_range(1000, 2000);
 
        if (adapter->fc_autoneg == AUTONEG_ENABLE) {
                hw->fc.requested_mode = e1000_fc_default;
@@ -886,7 +883,7 @@ static int igb_set_ringparam(struct net_device *netdev,
        }
 
        while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
-               msleep(1);
+               usleep_range(1000, 2000);
 
        if (!netif_running(adapter->netdev)) {
                for (i = 0; i < adapter->num_tx_queues; i++)
@@ -1060,8 +1057,8 @@ static struct igb_reg_test reg_test_i350[] = {
        { E1000_TDT(0),    0x100, 4,  PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
        { E1000_TDT(4),    0x40,  4,  PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
        { E1000_RCTL,      0x100, 1,  SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
-       { E1000_RCTL,      0x100, 1,  SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB },
-       { E1000_RCTL,      0x100, 1,  SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF },
+       { E1000_RCTL,      0x100, 1,  SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB },
+       { E1000_RCTL,      0x100, 1,  SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF },
        { E1000_TCTL,      0x100, 1,  SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
        { E1000_RA,        0, 16, TABLE64_TEST_LO,
                                                0xFFFFFFFF, 0xFFFFFFFF },
@@ -1103,8 +1100,8 @@ static struct igb_reg_test reg_test_82580[] = {
        { E1000_TDT(0),    0x100, 4,  PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
        { E1000_TDT(4),    0x40,  4,  PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
        { E1000_RCTL,      0x100, 1,  SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
-       { E1000_RCTL,      0x100, 1,  SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB },
-       { E1000_RCTL,      0x100, 1,  SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF },
+       { E1000_RCTL,      0x100, 1,  SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB },
+       { E1000_RCTL,      0x100, 1,  SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF },
        { E1000_TCTL,      0x100, 1,  SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
        { E1000_RA,        0, 16, TABLE64_TEST_LO,
                                                0xFFFFFFFF, 0xFFFFFFFF },
@@ -1132,8 +1129,10 @@ static struct igb_reg_test reg_test_82576[] = {
        { E1000_RDBAH(4),  0x40, 12, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
        { E1000_RDLEN(4),  0x40, 12, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
        /* Enable all RX queues before testing. */
-       { E1000_RXDCTL(0), 0x100, 4,  WRITE_NO_TEST, 0, E1000_RXDCTL_QUEUE_ENABLE },
-       { E1000_RXDCTL(4), 0x40, 12,  WRITE_NO_TEST, 0, E1000_RXDCTL_QUEUE_ENABLE },
+       { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0,
+         E1000_RXDCTL_QUEUE_ENABLE },
+       { E1000_RXDCTL(4), 0x40, 12, WRITE_NO_TEST, 0,
+         E1000_RXDCTL_QUEUE_ENABLE },
        /* RDH is read-only for 82576, only test RDT. */
        { E1000_RDT(0),    0x100, 4,  PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
        { E1000_RDT(4),    0x40, 12,  PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
@@ -1149,14 +1148,14 @@ static struct igb_reg_test reg_test_82576[] = {
        { E1000_TDBAH(4),  0x40, 12,  PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
        { E1000_TDLEN(4),  0x40, 12,  PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
        { E1000_RCTL,      0x100, 1,  SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
-       { E1000_RCTL,      0x100, 1,  SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB },
-       { E1000_RCTL,      0x100, 1,  SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF },
+       { E1000_RCTL,      0x100, 1,  SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB },
+       { E1000_RCTL,      0x100, 1,  SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF },
        { E1000_TCTL,      0x100, 1,  SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
        { E1000_RA,        0, 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF },
        { E1000_RA,        0, 16, TABLE64_TEST_HI, 0x83FFFFFF, 0xFFFFFFFF },
        { E1000_RA2,       0, 8, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF },
        { E1000_RA2,       0, 8, TABLE64_TEST_HI, 0x83FFFFFF, 0xFFFFFFFF },
-       { E1000_MTA,       0, 128,TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+       { E1000_MTA,       0, 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
        { 0, 0, 0, 0 }
 };
 
@@ -1170,7 +1169,8 @@ static struct igb_reg_test reg_test_82575[] = {
        { E1000_RDBAH(0),  0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
        { E1000_RDLEN(0),  0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
        /* Enable all four RX queues before testing. */
-       { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0, E1000_RXDCTL_QUEUE_ENABLE },
+       { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0,
+         E1000_RXDCTL_QUEUE_ENABLE },
        /* RDH is read-only for 82575, only test RDT. */
        { E1000_RDT(0),    0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
        { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0, 0 },
@@ -1196,8 +1196,8 @@ static bool reg_pattern_test(struct igb_adapter *adapter, u64 *data,
 {
        struct e1000_hw *hw = &adapter->hw;
        u32 pat, val;
-       static const u32 _test[] =
-               {0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF};
+       static const u32 _test[] = {
+               0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF};
        for (pat = 0; pat < ARRAY_SIZE(_test); pat++) {
                wr32(reg, (_test[pat] & write));
                val = rd32(reg) & mask;
@@ -1206,11 +1206,11 @@ static bool reg_pattern_test(struct igb_adapter *adapter, u64 *data,
                                "pattern test reg %04X failed: got 0x%08X expected 0x%08X\n",
                                reg, val, (_test[pat] & write & mask));
                        *data = reg;
-                       return 1;
+                       return true;
                }
        }
 
-       return 0;
+       return false;
 }
 
 static bool reg_set_and_check(struct igb_adapter *adapter, u64 *data,
@@ -1218,17 +1218,18 @@ static bool reg_set_and_check(struct igb_adapter *adapter, u64 *data,
 {
        struct e1000_hw *hw = &adapter->hw;
        u32 val;
+
        wr32(reg, write & mask);
        val = rd32(reg);
        if ((write & mask) != (val & mask)) {
                dev_err(&adapter->pdev->dev,
-                       "set/check reg %04X test failed: got 0x%08X expected 0x%08X\n", reg,
-                       (val & mask), (write & mask));
+                       "set/check reg %04X test failed: got 0x%08X expected 0x%08X\n",
+                       reg, (val & mask), (write & mask));
                *data = reg;
-               return 1;
+               return true;
        }
 
-       return 0;
+       return false;
 }
 
 #define REG_PATTERN_TEST(reg, mask, write) \
@@ -1387,14 +1388,14 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
        /* Hook up test interrupt handler just for this test */
        if (adapter->flags & IGB_FLAG_HAS_MSIX) {
                if (request_irq(adapter->msix_entries[0].vector,
-                               igb_test_intr, 0, netdev->name, adapter)) {
+                               igb_test_intr, 0, netdev->name, adapter)) {
                        *data = 1;
                        return -1;
                }
        } else if (adapter->flags & IGB_FLAG_HAS_MSI) {
                shared_int = false;
                if (request_irq(irq,
-                               igb_test_intr, 0, netdev->name, adapter)) {
+                               igb_test_intr, 0, netdev->name, adapter)) {
                        *data = 1;
                        return -1;
                }
@@ -1412,7 +1413,7 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
        /* Disable all the interrupts */
        wr32(E1000_IMC, ~0);
        wrfl();
-       msleep(10);
+       usleep_range(10000, 11000);
 
        /* Define all writable bits for ICS */
        switch (hw->mac.type) {
@@ -1459,7 +1460,7 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
                        wr32(E1000_IMC, mask);
                        wr32(E1000_ICS, mask);
                        wrfl();
-                       msleep(10);
+                       usleep_range(10000, 11000);
 
                        if (adapter->test_icr & mask) {
                                *data = 3;
@@ -1481,7 +1482,7 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
                wr32(E1000_IMS, mask);
                wr32(E1000_ICS, mask);
                wrfl();
-               msleep(10);
+               usleep_range(10000, 11000);
 
                if (!(adapter->test_icr & mask)) {
                        *data = 4;
@@ -1503,7 +1504,7 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
                        wr32(E1000_IMC, ~mask);
                        wr32(E1000_ICS, ~mask);
                        wrfl();
-                       msleep(10);
+                       usleep_range(10000, 11000);
 
                        if (adapter->test_icr & mask) {
                                *data = 5;
@@ -1515,7 +1516,7 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
        /* Disable all the interrupts */
        wr32(E1000_IMC, ~0);
        wrfl();
-       msleep(10);
+       usleep_range(10000, 11000);
 
        /* Unhook test interrupt handler */
        if (adapter->flags & IGB_FLAG_HAS_MSIX)
@@ -1949,6 +1950,7 @@ static int igb_link_test(struct igb_adapter *adapter, u64 *data)
        *data = 0;
        if (hw->phy.media_type == e1000_media_type_internal_serdes) {
                int i = 0;
+
                hw->mac.serdes_has_link = false;
 
                /* On some blade server designs, link establishment
@@ -2413,9 +2415,11 @@ static int igb_get_rss_hash_opts(struct igb_adapter *adapter,
        switch (cmd->flow_type) {
        case TCP_V4_FLOW:
                cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+               /* Fall through */
        case UDP_V4_FLOW:
                if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV4_UDP)
                        cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+               /* Fall through */
        case SCTP_V4_FLOW:
        case AH_ESP_V4_FLOW:
        case AH_V4_FLOW:
@@ -2425,9 +2429,11 @@ static int igb_get_rss_hash_opts(struct igb_adapter *adapter,
                break;
        case TCP_V6_FLOW:
                cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+               /* Fall through */
        case UDP_V6_FLOW:
                if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV6_UDP)
                        cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+               /* Fall through */
        case SCTP_V6_FLOW:
        case AH_ESP_V6_FLOW:
        case AH_V6_FLOW:
@@ -3029,5 +3035,5 @@ static const struct ethtool_ops igb_ethtool_ops = {
 
 void igb_set_ethtool_ops(struct net_device *netdev)
 {
-       SET_ETHTOOL_OPS(netdev, &igb_ethtool_ops);
+       netdev->ethtool_ops = &igb_ethtool_ops;
 }
index 8333f67acf96b3a6e83746c1cedd1f0eff7636ec..44b6a68f1af727136271132014b1efa412ee7e32 100644 (file)
@@ -1,28 +1,25 @@
-/*******************************************************************************
-
-  Intel(R) Gigabit Ethernet Linux driver
-  Copyright(c) 2007-2014 Intel Corporation.
-
-  This program is free software; you can redistribute it and/or modify it
-  under the terms and conditions of the GNU General Public License,
-  version 2, as published by the Free Software Foundation.
-
-  This program is distributed in the hope it will be useful, but WITHOUT
-  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
-  more details.
-
-  You should have received a copy of the GNU General Public License along with
-  this program; if not, see <http://www.gnu.org/licenses/>.
-
-  The full GNU General Public License is included in this distribution in
-  the file called "COPYING".
-
-  Contact Information:
-  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
-  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
 
 #include "igb.h"
 #include "e1000_82575.h"
index fb98d4602f9d4fd130b660de0265bee786fec7a6..bfcda8a455f46cc99058faf1a8bd0c339e7300e2 100644 (file)
@@ -1,28 +1,25 @@
-/*******************************************************************************
-
-  Intel(R) Gigabit Ethernet Linux driver
-  Copyright(c) 2007-2014 Intel Corporation.
-
-  This program is free software; you can redistribute it and/or modify it
-  under the terms and conditions of the GNU General Public License,
-  version 2, as published by the Free Software Foundation.
-
-  This program is distributed in the hope it will be useful, but WITHOUT
-  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
-  more details.
-
-  You should have received a copy of the GNU General Public License along with
-  this program; if not, see <http://www.gnu.org/licenses/>.
-
-  The full GNU General Public License is included in this distribution in
-  the file called "COPYING".
-
-  Contact Information:
-  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
-  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
 
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
@@ -75,7 +72,7 @@ static const struct e1000_info *igb_info_tbl[] = {
        [board_82575] = &e1000_82575_info,
 };
 
-static DEFINE_PCI_DEVICE_TABLE(igb_pci_tbl) = {
+static const struct pci_device_id igb_pci_tbl[] = {
        { PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_BACKPLANE_1GBPS) },
        { PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_SGMII) },
        { PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_BACKPLANE_2_5GBPS) },
@@ -117,7 +114,6 @@ static DEFINE_PCI_DEVICE_TABLE(igb_pci_tbl) = {
 
 MODULE_DEVICE_TABLE(pci, igb_pci_tbl);
 
-void igb_reset(struct igb_adapter *);
 static int igb_setup_all_tx_resources(struct igb_adapter *);
 static int igb_setup_all_rx_resources(struct igb_adapter *);
 static void igb_free_all_tx_resources(struct igb_adapter *);
@@ -141,7 +137,7 @@ static void igb_watchdog(unsigned long);
 static void igb_watchdog_task(struct work_struct *);
 static netdev_tx_t igb_xmit_frame(struct sk_buff *skb, struct net_device *);
 static struct rtnl_link_stats64 *igb_get_stats64(struct net_device *dev,
-                                                struct rtnl_link_stats64 *stats);
+                                         struct rtnl_link_stats64 *stats);
 static int igb_change_mtu(struct net_device *, int);
 static int igb_set_mac(struct net_device *, void *);
 static void igb_set_uta(struct igb_adapter *adapter);
@@ -159,7 +155,8 @@ static bool igb_clean_rx_irq(struct igb_q_vector *, int);
 static int igb_ioctl(struct net_device *, struct ifreq *, int cmd);
 static void igb_tx_timeout(struct net_device *);
 static void igb_reset_task(struct work_struct *);
-static void igb_vlan_mode(struct net_device *netdev, netdev_features_t features);
+static void igb_vlan_mode(struct net_device *netdev,
+                         netdev_features_t features);
 static int igb_vlan_rx_add_vid(struct net_device *, __be16, u16);
 static int igb_vlan_rx_kill_vid(struct net_device *, __be16, u16);
 static void igb_restore_vlan(struct igb_adapter *);
@@ -215,10 +212,9 @@ static struct notifier_block dca_notifier = {
 static void igb_netpoll(struct net_device *);
 #endif
 #ifdef CONFIG_PCI_IOV
-static unsigned int max_vfs = 0;
+static unsigned int max_vfs;
 module_param(max_vfs, uint, 0);
-MODULE_PARM_DESC(max_vfs, "Maximum number of virtual functions to allocate "
-                 "per physical function");
+MODULE_PARM_DESC(max_vfs, "Maximum number of virtual functions to allocate per physical function");
 #endif /* CONFIG_PCI_IOV */
 
 static pci_ers_result_t igb_io_error_detected(struct pci_dev *,
@@ -384,8 +380,7 @@ static void igb_dump(struct igb_adapter *adapter)
        /* Print netdevice Info */
        if (netdev) {
                dev_info(&adapter->pdev->dev, "Net device Info\n");
-               pr_info("Device Name     state            trans_start      "
-                       "last_rx\n");
+               pr_info("Device Name     state            trans_start      last_rx\n");
                pr_info("%-15s %016lX %016lX %016lX\n", netdev->name,
                        netdev->state, netdev->trans_start, netdev->last_rx);
        }
@@ -438,9 +433,7 @@ static void igb_dump(struct igb_adapter *adapter)
                pr_info("------------------------------------\n");
                pr_info("TX QUEUE INDEX = %d\n", tx_ring->queue_index);
                pr_info("------------------------------------\n");
-               pr_info("T [desc]     [address 63:0  ] [PlPOCIStDDM Ln] "
-                       "[bi->dma       ] leng  ntw timestamp        "
-                       "bi->skb\n");
+               pr_info("T [desc]     [address 63:0  ] [PlPOCIStDDM Ln] [bi->dma       ] leng  ntw timestamp        bi->skb\n");
 
                for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
                        const char *next_desc;
@@ -458,9 +451,8 @@ static void igb_dump(struct igb_adapter *adapter)
                        else
                                next_desc = "";
 
-                       pr_info("T [0x%03X]    %016llX %016llX %016llX"
-                               " %04X  %p %016llX %p%s\n", i,
-                               le64_to_cpu(u0->a),
+                       pr_info("T [0x%03X]    %016llX %016llX %016llX %04X  %p %016llX %p%s\n",
+                               i, le64_to_cpu(u0->a),
                                le64_to_cpu(u0->b),
                                (u64)dma_unmap_addr(buffer_info, dma),
                                dma_unmap_len(buffer_info, len),
@@ -519,10 +511,8 @@ rx_ring_summary:
                pr_info("------------------------------------\n");
                pr_info("RX QUEUE INDEX = %d\n", rx_ring->queue_index);
                pr_info("------------------------------------\n");
-               pr_info("R  [desc]      [ PktBuf     A0] [  HeadBuf   DD] "
-                       "[bi->dma       ] [bi->skb] <-- Adv Rx Read format\n");
-               pr_info("RWB[desc]      [PcsmIpSHl PtRs] [vl er S cks ln] -----"
-                       "----------- [bi->skb] <-- Adv Rx Write-Back format\n");
+               pr_info("R  [desc]      [ PktBuf     A0] [  HeadBuf   DD] [bi->dma       ] [bi->skb] <-- Adv Rx Read format\n");
+               pr_info("RWB[desc]      [PcsmIpSHl PtRs] [vl er S cks ln] ---------------- [bi->skb] <-- Adv Rx Write-Back format\n");
 
                for (i = 0; i < rx_ring->count; i++) {
                        const char *next_desc;
@@ -584,7 +574,7 @@ static int igb_get_i2c_data(void *data)
        struct e1000_hw *hw = &adapter->hw;
        s32 i2cctl = rd32(E1000_I2CPARAMS);
 
-       return ((i2cctl & E1000_I2C_DATA_IN) != 0);
+       return !!(i2cctl & E1000_I2C_DATA_IN);
 }
 
 /**
@@ -648,7 +638,7 @@ static int igb_get_i2c_clk(void *data)
        struct e1000_hw *hw = &adapter->hw;
        s32 i2cctl = rd32(E1000_I2CPARAMS);
 
-       return ((i2cctl & E1000_I2C_CLK_IN) != 0);
+       return !!(i2cctl & E1000_I2C_CLK_IN);
 }
 
 static const struct i2c_algo_bit_data igb_i2c_algo = {
@@ -681,9 +671,9 @@ struct net_device *igb_get_hw_dev(struct e1000_hw *hw)
 static int __init igb_init_module(void)
 {
        int ret;
+
        pr_info("%s - version %s\n",
               igb_driver_string, igb_driver_version);
-
        pr_info("%s\n", igb_copyright);
 
 #ifdef CONFIG_IGB_DCA
@@ -736,12 +726,14 @@ static void igb_cache_ring_register(struct igb_adapter *adapter)
                                adapter->rx_ring[i]->reg_idx = rbase_offset +
                                                               Q_IDX_82576(i);
                }
+               /* Fall through */
        case e1000_82575:
        case e1000_82580:
        case e1000_i350:
        case e1000_i354:
        case e1000_i210:
        case e1000_i211:
+               /* Fall through */
        default:
                for (; i < adapter->num_rx_queues; i++)
                        adapter->rx_ring[i]->reg_idx = rbase_offset + i;
@@ -1292,8 +1284,7 @@ static int igb_alloc_q_vector(struct igb_adapter *adapter,
                if (adapter->hw.mac.type >= e1000_82576)
                        set_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags);
 
-               /*
-                * On i350, i354, i210, and i211, loopback VLAN packets
+               /* On i350, i354, i210, and i211, loopback VLAN packets
                 * have the tag byte-swapped.
                 */
                if (adapter->hw.mac.type >= e1000_i350)
@@ -1345,6 +1336,7 @@ static int igb_alloc_q_vectors(struct igb_adapter *adapter)
        for (; v_idx < q_vectors; v_idx++) {
                int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx);
                int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx);
+
                err = igb_alloc_q_vector(adapter, q_vectors, v_idx,
                                         tqpv, txr_idx, rqpv, rxr_idx);
 
@@ -1484,6 +1476,7 @@ static void igb_irq_disable(struct igb_adapter *adapter)
         */
        if (adapter->flags & IGB_FLAG_HAS_MSIX) {
                u32 regval = rd32(E1000_EIAM);
+
                wr32(E1000_EIAM, regval & ~adapter->eims_enable_mask);
                wr32(E1000_EIMC, adapter->eims_enable_mask);
                regval = rd32(E1000_EIAC);
@@ -1495,6 +1488,7 @@ static void igb_irq_disable(struct igb_adapter *adapter)
        wrfl();
        if (adapter->flags & IGB_FLAG_HAS_MSIX) {
                int i;
+
                for (i = 0; i < adapter->num_q_vectors; i++)
                        synchronize_irq(adapter->msix_entries[i].vector);
        } else {
@@ -1513,6 +1507,7 @@ static void igb_irq_enable(struct igb_adapter *adapter)
        if (adapter->flags & IGB_FLAG_HAS_MSIX) {
                u32 ims = E1000_IMS_LSC | E1000_IMS_DOUTSYNC | E1000_IMS_DRSTA;
                u32 regval = rd32(E1000_EIAC);
+
                wr32(E1000_EIAC, regval | adapter->eims_enable_mask);
                regval = rd32(E1000_EIAM);
                wr32(E1000_EIAM, regval | adapter->eims_enable_mask);
@@ -1745,6 +1740,7 @@ int igb_up(struct igb_adapter *adapter)
        /* notify VFs that reset has been completed */
        if (adapter->vfs_allocated_count) {
                u32 reg_data = rd32(E1000_CTRL_EXT);
+
                reg_data |= E1000_CTRL_EXT_PFRSTD;
                wr32(E1000_CTRL_EXT, reg_data);
        }
@@ -1787,7 +1783,7 @@ void igb_down(struct igb_adapter *adapter)
        wr32(E1000_TCTL, tctl);
        /* flush both disables and wait for them to finish */
        wrfl();
-       msleep(10);
+       usleep_range(10000, 11000);
 
        igb_irq_disable(adapter);
 
@@ -1827,7 +1823,7 @@ void igb_reinit_locked(struct igb_adapter *adapter)
 {
        WARN_ON(in_interrupt());
        while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
-               msleep(1);
+               usleep_range(1000, 2000);
        igb_down(adapter);
        igb_up(adapter);
        clear_bit(__IGB_RESETTING, &adapter->state);
@@ -1960,6 +1956,7 @@ void igb_reset(struct igb_adapter *adapter)
        /* disable receive for all VFs and wait one second */
        if (adapter->vfs_allocated_count) {
                int i;
+
                for (i = 0 ; i < adapter->vfs_allocated_count; i++)
                        adapter->vf_data[i].flags &= IGB_VF_FLAG_PF_SET_MAC;
 
@@ -2529,7 +2526,8 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        }
 
        /* let the f/w know that the h/w is now under the control of the
-        * driver. */
+        * driver.
+        */
        igb_get_hw_control(adapter);
 
        strcpy(netdev->name, "eth%d");
@@ -3077,6 +3075,7 @@ static int __igb_open(struct net_device *netdev, bool resuming)
        /* notify VFs that reset has been completed */
        if (adapter->vfs_allocated_count) {
                u32 reg_data = rd32(E1000_CTRL_EXT);
+
                reg_data |= E1000_CTRL_EXT_PFRSTD;
                wr32(E1000_CTRL_EXT, reg_data);
        }
@@ -3248,7 +3247,7 @@ void igb_setup_tctl(struct igb_adapter *adapter)
  *  Configure a transmit ring after a reset.
  **/
 void igb_configure_tx_ring(struct igb_adapter *adapter,
-                           struct igb_ring *ring)
+                          struct igb_ring *ring)
 {
        struct e1000_hw *hw = &adapter->hw;
        u32 txdctl = 0;
@@ -3389,7 +3388,8 @@ static void igb_setup_mrqc(struct igb_adapter *adapter)
 
        if (adapter->rss_indir_tbl_init != num_rx_queues) {
                for (j = 0; j < IGB_RETA_SIZE; j++)
-                       adapter->rss_indir_tbl[j] = (j * num_rx_queues) / IGB_RETA_SIZE;
+                       adapter->rss_indir_tbl[j] =
+                       (j * num_rx_queues) / IGB_RETA_SIZE;
                adapter->rss_indir_tbl_init = num_rx_queues;
        }
        igb_write_rss_indir_tbl(adapter);
@@ -3430,6 +3430,7 @@ static void igb_setup_mrqc(struct igb_adapter *adapter)
                if (hw->mac.type > e1000_82575) {
                        /* Set the default pool for the PF's first queue */
                        u32 vtctl = rd32(E1000_VT_CTL);
+
                        vtctl &= ~(E1000_VT_CTL_DEFAULT_POOL_MASK |
                                   E1000_VT_CTL_DISABLE_DEF_POOL);
                        vtctl |= adapter->vfs_allocated_count <<
@@ -3511,7 +3512,7 @@ void igb_setup_rctl(struct igb_adapter *adapter)
 }
 
 static inline int igb_set_vf_rlpml(struct igb_adapter *adapter, int size,
-                                   int vfn)
+                                  int vfn)
 {
        struct e1000_hw *hw = &adapter->hw;
        u32 vmolr;
@@ -4058,7 +4059,8 @@ static void igb_check_wvbr(struct igb_adapter *adapter)
        switch (hw->mac.type) {
        case e1000_82576:
        case e1000_i350:
-               if (!(wvbr = rd32(E1000_WVBR)))
+               wvbr = rd32(E1000_WVBR);
+               if (!wvbr)
                        return;
                break;
        default:
@@ -4077,7 +4079,7 @@ static void igb_spoof_check(struct igb_adapter *adapter)
        if (!adapter->wvbr)
                return;
 
-       for(j = 0; j < adapter->vfs_allocated_count; j++) {
+       for (j = 0; j < adapter->vfs_allocated_count; j++) {
                if (adapter->wvbr & (1 << j) ||
                    adapter->wvbr & (1 << (j + IGB_STAGGERED_QUEUE_OFFSET))) {
                        dev_warn(&adapter->pdev->dev,
@@ -4209,14 +4211,15 @@ static void igb_watchdog_task(struct work_struct *work)
 
                if (!netif_carrier_ok(netdev)) {
                        u32 ctrl;
+
                        hw->mac.ops.get_speed_and_duplex(hw,
                                                         &adapter->link_speed,
                                                         &adapter->link_duplex);
 
                        ctrl = rd32(E1000_CTRL);
                        /* Links status message must follow this format */
-                       printk(KERN_INFO "igb: %s NIC Link is Up %d Mbps %s "
-                              "Duplex, Flow Control: %s\n",
+                       netdev_info(netdev,
+                              "igb: %s NIC Link is Up %d Mbps %s Duplex, Flow Control: %s\n",
                               netdev->name,
                               adapter->link_speed,
                               adapter->link_duplex == FULL_DUPLEX ?
@@ -4242,11 +4245,8 @@ static void igb_watchdog_task(struct work_struct *work)
 
                        /* check for thermal sensor event */
                        if (igb_thermal_sensor_event(hw,
-                           E1000_THSTAT_LINK_THROTTLE)) {
-                               netdev_info(netdev, "The network adapter link "
-                                           "speed was downshifted because it "
-                                           "overheated\n");
-                       }
+                           E1000_THSTAT_LINK_THROTTLE))
+                               netdev_info(netdev, "The network adapter link speed was downshifted because it overheated\n");
 
                        /* adjust timeout factor according to speed/duplex */
                        adapter->tx_timeout_factor = 1;
@@ -4277,12 +4277,11 @@ static void igb_watchdog_task(struct work_struct *work)
                        /* check for thermal sensor event */
                        if (igb_thermal_sensor_event(hw,
                            E1000_THSTAT_PWR_DOWN)) {
-                               netdev_err(netdev, "The network adapter was "
-                                          "stopped because it overheated\n");
+                               netdev_err(netdev, "The network adapter was stopped because it overheated\n");
                        }
 
                        /* Links status message must follow this format */
-                       printk(KERN_INFO "igb: %s NIC Link is Down\n",
+                       netdev_info(netdev, "igb: %s NIC Link is Down\n",
                               netdev->name);
                        netif_carrier_off(netdev);
 
@@ -4344,6 +4343,7 @@ static void igb_watchdog_task(struct work_struct *work)
        /* Cause software interrupt to ensure Rx ring is cleaned */
        if (adapter->flags & IGB_FLAG_HAS_MSIX) {
                u32 eics = 0;
+
                for (i = 0; i < adapter->num_q_vectors; i++)
                        eics |= adapter->q_vector[i]->eims_value;
                wr32(E1000_EICS, eics);
@@ -4483,13 +4483,12 @@ static void igb_update_itr(struct igb_q_vector *q_vector,
        case low_latency:  /* 50 usec aka 20000 ints/s */
                if (bytes > 10000) {
                        /* this if handles the TSO accounting */
-                       if (bytes/packets > 8000) {
+                       if (bytes/packets > 8000)
                                itrval = bulk_latency;
-                       } else if ((packets < 10) || ((bytes/packets) > 1200)) {
+                       else if ((packets < 10) || ((bytes/packets) > 1200))
                                itrval = bulk_latency;
-                       } else if ((packets > 35)) {
+                       else if ((packets > 35))
                                itrval = lowest_latency;
-                       }
                } else if (bytes/packets > 2000) {
                        itrval = bulk_latency;
                } else if (packets <= 2 && bytes < 512) {
@@ -4675,6 +4674,7 @@ static void igb_tx_csum(struct igb_ring *tx_ring, struct igb_tx_buffer *first)
                        return;
        } else {
                u8 l4_hdr = 0;
+
                switch (first->protocol) {
                case htons(ETH_P_IP):
                        vlan_macip_lens |= skb_network_header_len(skb);
@@ -4962,6 +4962,7 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
         */
        if (NETDEV_FRAG_PAGE_MAX_SIZE > IGB_MAX_DATA_PER_TXD) {
                unsigned short f;
+
                for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
                        count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
        } else {
@@ -5140,7 +5141,7 @@ static int igb_change_mtu(struct net_device *netdev, int new_mtu)
                max_frame = ETH_FRAME_LEN + ETH_FCS_LEN;
 
        while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
-               msleep(1);
+               usleep_range(1000, 2000);
 
        /* igb_down has a dependency on max_frame_size */
        adapter->max_frame_size = max_frame;
@@ -5193,8 +5194,10 @@ void igb_update_stats(struct igb_adapter *adapter,
 
        rcu_read_lock();
        for (i = 0; i < adapter->num_rx_queues; i++) {
-               u32 rqdpc = rd32(E1000_RQDPC(i));
                struct igb_ring *ring = adapter->rx_ring[i];
+               u32 rqdpc = rd32(E1000_RQDPC(i));
+               if (hw->mac.type >= e1000_i210)
+                       wr32(E1000_RQDPC(i), 0);
 
                if (rqdpc) {
                        ring->rx_stats.drops += rqdpc;
@@ -5619,6 +5622,7 @@ static int igb_set_vf_promisc(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
                        vmolr |= E1000_VMOLR_MPME;
                } else if (vf_data->num_vf_mc_hashes) {
                        int j;
+
                        vmolr |= E1000_VMOLR_ROMPE;
                        for (j = 0; j < vf_data->num_vf_mc_hashes; j++)
                                igb_mta_set(hw, vf_data->vf_mc_hashes[j]);
@@ -5670,6 +5674,7 @@ static void igb_restore_vf_multicasts(struct igb_adapter *adapter)
 
        for (i = 0; i < adapter->vfs_allocated_count; i++) {
                u32 vmolr = rd32(E1000_VMOLR(i));
+
                vmolr &= ~(E1000_VMOLR_ROMPE | E1000_VMOLR_MPME);
 
                vf_data = &adapter->vf_data[i];
@@ -5768,6 +5773,7 @@ static s32 igb_vlvf_set(struct igb_adapter *adapter, u32 vid, bool add, u32 vf)
 
                        if (!adapter->vf_data[vf].vlans_enabled) {
                                u32 size;
+
                                reg = rd32(E1000_VMOLR(vf));
                                size = reg & E1000_VMOLR_RLPML_MASK;
                                size += 4;
@@ -5796,6 +5802,7 @@ static s32 igb_vlvf_set(struct igb_adapter *adapter, u32 vid, bool add, u32 vf)
                        adapter->vf_data[vf].vlans_enabled--;
                        if (!adapter->vf_data[vf].vlans_enabled) {
                                u32 size;
+
                                reg = rd32(E1000_VMOLR(vf));
                                size = reg & E1000_VMOLR_RLPML_MASK;
                                size -= 4;
@@ -5900,8 +5907,8 @@ static int igb_set_vf_vlan(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
         */
        if (!add && (adapter->netdev->flags & IFF_PROMISC)) {
                u32 vlvf, bits;
-
                int regndx = igb_find_vlvf_entry(adapter, vid);
+
                if (regndx < 0)
                        goto out;
                /* See if any other pools are set for this VLAN filter
@@ -6492,7 +6499,7 @@ static void igb_reuse_rx_page(struct igb_ring *rx_ring,
        rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
 
        /* transfer page from old buffer to new buffer */
-       memcpy(new_buff, old_buff, sizeof(struct igb_rx_buffer));
+       *new_buff = *old_buff;
 
        /* sync the buffer for use by the device */
        dma_sync_single_range_for_device(rx_ring->dev, old_buff->dma,
@@ -6961,6 +6968,7 @@ static void igb_process_skb_fields(struct igb_ring *rx_ring,
        if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
            igb_test_staterr(rx_desc, E1000_RXD_STAT_VP)) {
                u16 vid;
+
                if (igb_test_staterr(rx_desc, E1000_RXDEXT_STATERR_LB) &&
                    test_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &rx_ring->flags))
                        vid = be16_to_cpu(rx_desc->wb.upper.vlan);
@@ -7049,7 +7057,7 @@ static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
        if (cleaned_count)
                igb_alloc_rx_buffers(rx_ring, cleaned_count);
 
-       return (total_packets < budget);
+       return total_packets < budget;
 }
 
 static bool igb_alloc_mapped_page(struct igb_ring *rx_ring,
@@ -7170,7 +7178,7 @@ static int igb_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
                break;
        case SIOCGMIIREG:
                if (igb_read_phy_reg(&adapter->hw, data->reg_num & 0x1F,
-                                    &data->val_out))
+                                    &data->val_out))
                        return -EIO;
                break;
        case SIOCSMIIREG:
@@ -7953,11 +7961,13 @@ static void igb_vmm_control(struct igb_adapter *adapter)
                reg = rd32(E1000_DTXCTL);
                reg |= E1000_DTXCTL_VLAN_ADDED;
                wr32(E1000_DTXCTL, reg);
+               /* Fall through */
        case e1000_82580:
                /* enable replication vlan tag stripping */
                reg = rd32(E1000_RPLOLR);
                reg |= E1000_RPLOLR_STRVLAN;
                wr32(E1000_RPLOLR, reg);
+               /* Fall through */
        case e1000_i350:
                /* none of the above registers are supported by i350 */
                break;
@@ -8047,6 +8057,7 @@ static void igb_init_dmac(struct igb_adapter *adapter, u32 pba)
                } /* endif adapter->dmac is not disabled */
        } else if (hw->mac.type == e1000_82580) {
                u32 reg = rd32(E1000_PCIEMISC);
+
                wr32(E1000_PCIEMISC, reg & ~E1000_PCIEMISC_LX_DECISION);
                wr32(E1000_DMACR, 0);
        }
index 9209d652e1c96090c712d78ce925612871ebad64..ab25e49365f79e26cfcad7a7ad12b8c5192824c9 100644 (file)
@@ -389,7 +389,7 @@ static void igb_ptp_tx_work(struct work_struct *work)
                adapter->ptp_tx_skb = NULL;
                clear_bit_unlock(__IGB_PTP_TX_IN_PROGRESS, &adapter->state);
                adapter->tx_hwtstamp_timeouts++;
-               dev_warn(&adapter->pdev->dev, "clearing Tx timestamp hang");
+               dev_warn(&adapter->pdev->dev, "clearing Tx timestamp hang\n");
                return;
        }
 
@@ -451,7 +451,7 @@ void igb_ptp_rx_hang(struct igb_adapter *adapter)
                rd32(E1000_RXSTMPH);
                adapter->last_rx_ptp_check = jiffies;
                adapter->rx_hwtstamp_cleared++;
-               dev_warn(&adapter->pdev->dev, "clearing Rx timestamp hang");
+               dev_warn(&adapter->pdev->dev, "clearing Rx timestamp hang\n");
        }
 }
 
index 90eef07943f4d50bbd027646e170abca5abfb1a3..f58170bae18b8323c2b7c12424707907ff4bc70d 100644 (file)
@@ -476,5 +476,5 @@ static const struct ethtool_ops igbvf_ethtool_ops = {
 
 void igbvf_set_ethtool_ops(struct net_device *netdev)
 {
-       SET_ETHTOOL_OPS(netdev, &igbvf_ethtool_ops);
+       netdev->ethtool_ops = &igbvf_ethtool_ops;
 }
index dbb7dd2f8e360e4d6c1013182e9d667a19d7fa56..1da2d987d370b12c0dbb6b81490d83e958b4ba36 100644 (file)
@@ -656,5 +656,5 @@ static const struct ethtool_ops ixgb_ethtool_ops = {
 
 void ixgb_set_ethtool_ops(struct net_device *netdev)
 {
-       SET_ETHTOOL_OPS(netdev, &ixgb_ethtool_ops);
+       netdev->ethtool_ops = &ixgb_ethtool_ops;
 }
index 1a12c1dd7a279c8f9db97c61a61ff4847c74db97..c688c8a4c0630ea5555214c6635bcc76940d6d57 100644 (file)
@@ -155,7 +155,6 @@ struct vf_data_storage {
 struct vf_macvlans {
        struct list_head l;
        int vf;
-       int rar_entry;
        bool free;
        bool is_macvlan;
        u8 vf_macvlan[ETH_ALEN];
@@ -256,7 +255,6 @@ struct ixgbe_ring {
                struct ixgbe_tx_buffer *tx_buffer_info;
                struct ixgbe_rx_buffer *rx_buffer_info;
        };
-       unsigned long last_rx_timestamp;
        unsigned long state;
        u8 __iomem *tail;
        dma_addr_t dma;                 /* phys. address of descriptor ring */
@@ -614,6 +612,15 @@ static inline void ixgbe_write_tail(struct ixgbe_ring *ring, u32 value)
 #define MAX_MSIX_VECTORS_82598 18
 #define MAX_Q_VECTORS_82598 16
 
+struct ixgbe_mac_addr {
+       u8 addr[ETH_ALEN];
+       u16 queue;
+       u16 state; /* bitmask */
+};
+#define IXGBE_MAC_STATE_DEFAULT                0x1
+#define IXGBE_MAC_STATE_MODIFIED       0x2
+#define IXGBE_MAC_STATE_IN_USE         0x4
+
 #define MAX_Q_VECTORS MAX_Q_VECTORS_82599
 #define MAX_MSIX_COUNT MAX_MSIX_VECTORS_82599
 
@@ -770,6 +777,7 @@ struct ixgbe_adapter {
        unsigned long ptp_tx_start;
        unsigned long last_overflow_check;
        unsigned long last_rx_ptp_check;
+       unsigned long last_rx_timestamp;
        spinlock_t tmreg_lock;
        struct cyclecounter cc;
        struct timecounter tc;
@@ -785,6 +793,7 @@ struct ixgbe_adapter {
 
        u32 timer_event_accumulator;
        u32 vferr_refcount;
+       struct ixgbe_mac_addr *mac_table;
        struct kobject *info_kobj;
 #ifdef CONFIG_IXGBE_HWMON
        struct hwmon_buff *ixgbe_hwmon_buff;
@@ -863,6 +872,13 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter);
 int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter);
 int ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id,
                               u16 subdevice_id);
+#ifdef CONFIG_PCI_IOV
+void ixgbe_full_sync_mac_table(struct ixgbe_adapter *adapter);
+#endif
+int ixgbe_add_mac_filter(struct ixgbe_adapter *adapter,
+                        u8 *addr, u16 queue);
+int ixgbe_del_mac_filter(struct ixgbe_adapter *adapter,
+                        u8 *addr, u16 queue);
 void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter);
 netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *, struct ixgbe_adapter *,
                                  struct ixgbe_ring *);
@@ -944,24 +960,7 @@ void ixgbe_ptp_init(struct ixgbe_adapter *adapter);
 void ixgbe_ptp_stop(struct ixgbe_adapter *adapter);
 void ixgbe_ptp_overflow_check(struct ixgbe_adapter *adapter);
 void ixgbe_ptp_rx_hang(struct ixgbe_adapter *adapter);
-void __ixgbe_ptp_rx_hwtstamp(struct ixgbe_q_vector *q_vector,
-                            struct sk_buff *skb);
-static inline void ixgbe_ptp_rx_hwtstamp(struct ixgbe_ring *rx_ring,
-                                        union ixgbe_adv_rx_desc *rx_desc,
-                                        struct sk_buff *skb)
-{
-       if (unlikely(!ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_STAT_TS)))
-               return;
-
-       __ixgbe_ptp_rx_hwtstamp(rx_ring->q_vector, skb);
-
-       /*
-        * Update the last_rx_timestamp timer in order to enable watchdog check
-        * for error case of latched timestamp on a dropped packet.
-        */
-       rx_ring->last_rx_timestamp = jiffies;
-}
-
+void ixgbe_ptp_rx_hwtstamp(struct ixgbe_adapter *adapter, struct sk_buff *skb);
 int ixgbe_ptp_set_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr);
 int ixgbe_ptp_get_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr);
 void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter);
index 4c78ea8946c1b48838db6d7e9ce890089ff6ab59..1c52e4753480c0a133451e9234ffc826772b2d41 100644 (file)
@@ -337,19 +337,25 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw)
        int i;
        bool link_up;
 
-       /*
-        * Validate the water mark configuration for packet buffer 0.  Zero
-        * water marks indicate that the packet buffer was not configured
-        * and the watermarks for packet buffer 0 should always be configured.
-        */
-       if (!hw->fc.low_water ||
-           !hw->fc.high_water[0] ||
-           !hw->fc.pause_time) {
-               hw_dbg(hw, "Invalid water mark configuration\n");
+       /* Validate the water mark configuration */
+       if (!hw->fc.pause_time) {
                ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
                goto out;
        }
 
+       /* Low water mark of zero causes XOFF floods */
+       for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
+               if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
+                   hw->fc.high_water[i]) {
+                       if (!hw->fc.low_water[i] ||
+                           hw->fc.low_water[i] >= hw->fc.high_water[i]) {
+                               hw_dbg(hw, "Invalid water mark configuration\n");
+                               ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
+                               goto out;
+                       }
+               }
+       }
+
        /*
         * On 82598 having Rx FC on causes resets while doing 1G
         * so if it's on turn it off once we know link_speed. For
@@ -432,12 +438,11 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw)
        IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl_reg);
        IXGBE_WRITE_REG(hw, IXGBE_RMCS, rmcs_reg);
 
-       fcrtl = (hw->fc.low_water << 10) | IXGBE_FCRTL_XONE;
-
        /* Set up and enable Rx high/low water mark thresholds, enable XON. */
        for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
                if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
                    hw->fc.high_water[i]) {
+                       fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE;
                        fcrth = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN;
                        IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), fcrtl);
                        IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), fcrth);
index 24fba39e194e682640391e42e5e618907ff230d9..bdc55819179d19e7be2994c692b51941c464943a 100644 (file)
@@ -271,6 +271,7 @@ out:
  **/
 s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw)
 {
+       s32 ret_val;
        u32 ctrl_ext;
 
        /* Set the media type */
@@ -292,12 +293,15 @@ s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw)
        IXGBE_WRITE_FLUSH(hw);
 
        /* Setup flow control */
-       ixgbe_setup_fc(hw);
+       ret_val = ixgbe_setup_fc(hw);
+       if (!ret_val)
+               goto out;
 
        /* Clear adapter stopped flag */
        hw->adapter_stopped = false;
 
-       return 0;
+out:
+       return ret_val;
 }
 
 /**
@@ -1195,7 +1199,7 @@ static s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw,
         */
        hw->eeprom.word_page_size = IXGBE_EEPROM_PAGE_SIZE_MAX - data[0];
 
-       hw_dbg(hw, "Detected EEPROM page size = %d words.",
+       hw_dbg(hw, "Detected EEPROM page size = %d words.\n",
               hw->eeprom.word_page_size);
 out:
        return status;
@@ -2106,19 +2110,25 @@ s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw)
        u32 fcrtl, fcrth;
        int i;
 
-       /*
-        * Validate the water mark configuration for packet buffer 0.  Zero
-        * water marks indicate that the packet buffer was not configured
-        * and the watermarks for packet buffer 0 should always be configured.
-        */
-       if (!hw->fc.low_water ||
-           !hw->fc.high_water[0] ||
-           !hw->fc.pause_time) {
-               hw_dbg(hw, "Invalid water mark configuration\n");
+       /* Validate the water mark configuration. */
+       if (!hw->fc.pause_time) {
                ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
                goto out;
        }
 
+       /* Low water mark of zero causes XOFF floods */
+       for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
+               if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
+                   hw->fc.high_water[i]) {
+                       if (!hw->fc.low_water[i] ||
+                           hw->fc.low_water[i] >= hw->fc.high_water[i]) {
+                               hw_dbg(hw, "Invalid water mark configuration\n");
+                               ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
+                               goto out;
+                       }
+               }
+       }
+
        /* Negotiate the fc mode to use */
        ixgbe_fc_autoneg(hw);
 
@@ -2181,12 +2191,11 @@ s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw)
        IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn_reg);
        IXGBE_WRITE_REG(hw, IXGBE_FCCFG, fccfg_reg);
 
-       fcrtl = (hw->fc.low_water << 10) | IXGBE_FCRTL_XONE;
-
        /* Set up and enable Rx high/low water mark thresholds, enable XON. */
        for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
                if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
                    hw->fc.high_water[i]) {
+                       fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE;
                        IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), fcrtl);
                        fcrth = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN;
                } else {
index f12c40fb5537a18604ff030f4adc4287946dbff4..d15ff2e5edb76b3dcb80a102666b2e2fa1dc6531 100644 (file)
@@ -141,8 +141,6 @@ static inline bool ixgbe_removed(void __iomem *addr)
        return unlikely(!addr);
 }
 
-void ixgbe_check_remove(struct ixgbe_hw *hw, u32 reg);
-
 static inline void ixgbe_write_reg(struct ixgbe_hw *hw, u32 reg, u32 value)
 {
        u8 __iomem *reg_addr = ACCESS_ONCE(hw->hw_addr);
@@ -172,18 +170,7 @@ static inline void ixgbe_write_reg64(struct ixgbe_hw *hw, u32 reg, u64 value)
 }
 #define IXGBE_WRITE_REG64(a, reg, value) ixgbe_write_reg64((a), (reg), (value))
 
-static inline u32 ixgbe_read_reg(struct ixgbe_hw *hw, u32 reg)
-{
-       u8 __iomem *reg_addr = ACCESS_ONCE(hw->hw_addr);
-       u32 value;
-
-       if (ixgbe_removed(reg_addr))
-               return IXGBE_FAILED_READ_REG;
-       value = readl(reg_addr + reg);
-       if (unlikely(value == IXGBE_FAILED_READ_REG))
-               ixgbe_check_remove(hw, reg);
-       return value;
-}
+u32 ixgbe_read_reg(struct ixgbe_hw *hw, u32 reg);
 #define IXGBE_READ_REG(a, reg) ixgbe_read_reg((a), (reg))
 
 #define IXGBE_WRITE_REG_ARRAY(a, reg, offset, value) \
index 7a77f37a7cbcbd6b7b5b87dd78e9e50b677e778c..d3ba63f9ad3712fcf82d1bafc3408070112d1aa0 100644 (file)
@@ -208,7 +208,6 @@ s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *hw, u8 pfc_en)
 
        IXGBE_WRITE_REG(hw, IXGBE_FCTRL, reg);
 
-       fcrtl = (hw->fc.low_water << 10) | IXGBE_FCRTL_XONE;
        /* Configure PFC Tx thresholds per TC */
        for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
                if (!(pfc_en & (1 << i))) {
@@ -217,6 +216,7 @@ s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *hw, u8 pfc_en)
                        continue;
                }
 
+               fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE;
                reg = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN;
                IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), fcrtl);
                IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), reg);
index bdb99b3b0f30f4ee2253e81bc72e84fb6a0d1e30..3b932fe64ab66c916f86f4184f45d626cc687cb1 100644 (file)
@@ -242,7 +242,6 @@ s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en, u8 *prio_tc)
                        max_tc = prio_tc[i];
        }
 
-       fcrtl = (hw->fc.low_water << 10) | IXGBE_FCRTL_XONE;
 
        /* Configure PFC Tx thresholds per TC */
        for (i = 0; i <= max_tc; i++) {
@@ -257,6 +256,7 @@ s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en, u8 *prio_tc)
 
                if (enabled) {
                        reg = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN;
+                       fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE;
                        IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), fcrtl);
                } else {
                        reg = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)) - 32;
index 6c55c14d082aa6285f43941e5d96ae86acdbb21b..31d7268401e71aeacc526fb190b962b296472a1a 100644 (file)
@@ -3099,5 +3099,5 @@ static const struct ethtool_ops ixgbe_ethtool_ops = {
 
 void ixgbe_set_ethtool_ops(struct net_device *netdev)
 {
-       SET_ETHTOOL_OPS(netdev, &ixgbe_ethtool_ops);
+       netdev->ethtool_ops = &ixgbe_ethtool_ops;
 }
index b16cc786750dec8bb7bf29749db331a31f028ec9..0772b7730fce92de4e2ff54d44f6528397c2b3a9 100644 (file)
@@ -81,9 +81,7 @@ struct ixgbe_fcoe {
        void *extra_ddp_buffer;
        dma_addr_t extra_ddp_buffer_dma;
        unsigned long mode;
-#ifdef CONFIG_IXGBE_DCB
        u8 up;
-#endif
 };
 
 #endif /* _IXGBE_FCOE_H */
index c4c526b7f99f48e2fe4eaccfe070273bd4078c98..8089ea9f2fba2b40662f3120a18ffbc84db33e5e 100644 (file)
@@ -301,7 +301,7 @@ static void ixgbe_remove_adapter(struct ixgbe_hw *hw)
                ixgbe_service_event_schedule(adapter);
 }
 
-void ixgbe_check_remove(struct ixgbe_hw *hw, u32 reg)
+static void ixgbe_check_remove(struct ixgbe_hw *hw, u32 reg)
 {
        u32 value;
 
@@ -320,6 +320,32 @@ void ixgbe_check_remove(struct ixgbe_hw *hw, u32 reg)
                ixgbe_remove_adapter(hw);
 }
 
+/**
+ * ixgbe_read_reg - Read from device register
+ * @hw: hw specific details
+ * @reg: offset of register to read
+ *
+ * Returns : value read or IXGBE_FAILED_READ_REG if removed
+ *
+ * This function is used to read device registers. It checks for device
+ * removal by confirming any read that returns all ones by checking the
+ * status register value for all ones. This function avoids reading from
+ * the hardware if a removal was previously detected in which case it
+ * returns IXGBE_FAILED_READ_REG (all ones).
+ */
+u32 ixgbe_read_reg(struct ixgbe_hw *hw, u32 reg)
+{
+       u8 __iomem *reg_addr = ACCESS_ONCE(hw->hw_addr);
+       u32 value;
+
+       if (ixgbe_removed(reg_addr))
+               return IXGBE_FAILED_READ_REG;
+       value = readl(reg_addr + reg);
+       if (unlikely(value == IXGBE_FAILED_READ_REG))
+               ixgbe_check_remove(hw, reg);
+       return value;
+}
+
 static bool ixgbe_check_cfg_remove(struct ixgbe_hw *hw, struct pci_dev *pdev)
 {
        u16 value;
@@ -1664,7 +1690,8 @@ static void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring,
 
        ixgbe_rx_checksum(rx_ring, rx_desc, skb);
 
-       ixgbe_ptp_rx_hwtstamp(rx_ring, rx_desc, skb);
+       if (unlikely(ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_STAT_TS)))
+               ixgbe_ptp_rx_hwtstamp(rx_ring->q_vector->adapter, skb);
 
        if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
            ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) {
@@ -3741,35 +3768,6 @@ static int ixgbe_vlan_rx_kill_vid(struct net_device *netdev,
        return 0;
 }
 
-/**
- * ixgbe_vlan_filter_disable - helper to disable hw vlan filtering
- * @adapter: driver data
- */
-static void ixgbe_vlan_filter_disable(struct ixgbe_adapter *adapter)
-{
-       struct ixgbe_hw *hw = &adapter->hw;
-       u32 vlnctrl;
-
-       vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
-       vlnctrl &= ~(IXGBE_VLNCTRL_VFE | IXGBE_VLNCTRL_CFIEN);
-       IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
-}
-
-/**
- * ixgbe_vlan_filter_enable - helper to enable hw vlan filtering
- * @adapter: driver data
- */
-static void ixgbe_vlan_filter_enable(struct ixgbe_adapter *adapter)
-{
-       struct ixgbe_hw *hw = &adapter->hw;
-       u32 vlnctrl;
-
-       vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
-       vlnctrl |= IXGBE_VLNCTRL_VFE;
-       vlnctrl &= ~IXGBE_VLNCTRL_CFIEN;
-       IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
-}
-
 /**
  * ixgbe_vlan_strip_disable - helper to disable hw vlan stripping
  * @adapter: driver data
@@ -3848,6 +3846,158 @@ static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter)
                ixgbe_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid);
 }
 
+/**
+ * ixgbe_write_mc_addr_list - write multicast addresses to MTA
+ * @netdev: network interface device structure
+ *
+ * Writes multicast address list to the MTA hash table.
+ * Returns: -ENOMEM on failure
+ *                0 on no addresses written
+ *                X on writing X addresses to MTA
+ **/
+static int ixgbe_write_mc_addr_list(struct net_device *netdev)
+{
+       struct ixgbe_adapter *adapter = netdev_priv(netdev);
+       struct ixgbe_hw *hw = &adapter->hw;
+
+       if (!netif_running(netdev))
+               return 0;
+
+       if (hw->mac.ops.update_mc_addr_list)
+               hw->mac.ops.update_mc_addr_list(hw, netdev);
+       else
+               return -ENOMEM;
+
+#ifdef CONFIG_PCI_IOV
+       ixgbe_restore_vf_multicasts(adapter);
+#endif
+
+       return netdev_mc_count(netdev);
+}
+
+#ifdef CONFIG_PCI_IOV
+void ixgbe_full_sync_mac_table(struct ixgbe_adapter *adapter)
+{
+       struct ixgbe_hw *hw = &adapter->hw;
+       int i;
+       for (i = 0; i < hw->mac.num_rar_entries; i++) {
+               if (adapter->mac_table[i].state & IXGBE_MAC_STATE_IN_USE)
+                       hw->mac.ops.set_rar(hw, i, adapter->mac_table[i].addr,
+                                           adapter->mac_table[i].queue,
+                                           IXGBE_RAH_AV);
+               else
+                       hw->mac.ops.clear_rar(hw, i);
+
+               adapter->mac_table[i].state &= ~(IXGBE_MAC_STATE_MODIFIED);
+       }
+}
+#endif
+
+static void ixgbe_sync_mac_table(struct ixgbe_adapter *adapter)
+{
+       struct ixgbe_hw *hw = &adapter->hw;
+       int i;
+       for (i = 0; i < hw->mac.num_rar_entries; i++) {
+               if (adapter->mac_table[i].state & IXGBE_MAC_STATE_MODIFIED) {
+                       if (adapter->mac_table[i].state &
+                           IXGBE_MAC_STATE_IN_USE)
+                               hw->mac.ops.set_rar(hw, i,
+                                               adapter->mac_table[i].addr,
+                                               adapter->mac_table[i].queue,
+                                               IXGBE_RAH_AV);
+                       else
+                               hw->mac.ops.clear_rar(hw, i);
+
+                       adapter->mac_table[i].state &=
+                                               ~(IXGBE_MAC_STATE_MODIFIED);
+               }
+       }
+}
+
+static void ixgbe_flush_sw_mac_table(struct ixgbe_adapter *adapter)
+{
+       int i;
+       struct ixgbe_hw *hw = &adapter->hw;
+
+       for (i = 0; i < hw->mac.num_rar_entries; i++) {
+               adapter->mac_table[i].state |= IXGBE_MAC_STATE_MODIFIED;
+               adapter->mac_table[i].state &= ~IXGBE_MAC_STATE_IN_USE;
+               memset(adapter->mac_table[i].addr, 0, ETH_ALEN);
+               adapter->mac_table[i].queue = 0;
+       }
+       ixgbe_sync_mac_table(adapter);
+}
+
+static int ixgbe_available_rars(struct ixgbe_adapter *adapter)
+{
+       struct ixgbe_hw *hw = &adapter->hw;
+       int i, count = 0;
+
+       for (i = 0; i < hw->mac.num_rar_entries; i++) {
+               if (adapter->mac_table[i].state == 0)
+                       count++;
+       }
+       return count;
+}
+
+/* this function destroys the first RAR entry */
+static void ixgbe_mac_set_default_filter(struct ixgbe_adapter *adapter,
+                                        u8 *addr)
+{
+       struct ixgbe_hw *hw = &adapter->hw;
+
+       memcpy(&adapter->mac_table[0].addr, addr, ETH_ALEN);
+       adapter->mac_table[0].queue = VMDQ_P(0);
+       adapter->mac_table[0].state = (IXGBE_MAC_STATE_DEFAULT |
+                                      IXGBE_MAC_STATE_IN_USE);
+       hw->mac.ops.set_rar(hw, 0, adapter->mac_table[0].addr,
+                           adapter->mac_table[0].queue,
+                           IXGBE_RAH_AV);
+}
+
+int ixgbe_add_mac_filter(struct ixgbe_adapter *adapter, u8 *addr, u16 queue)
+{
+       struct ixgbe_hw *hw = &adapter->hw;
+       int i;
+
+       if (is_zero_ether_addr(addr))
+               return -EINVAL;
+
+       for (i = 0; i < hw->mac.num_rar_entries; i++) {
+               if (adapter->mac_table[i].state & IXGBE_MAC_STATE_IN_USE)
+                       continue;
+               adapter->mac_table[i].state |= (IXGBE_MAC_STATE_MODIFIED |
+                                               IXGBE_MAC_STATE_IN_USE);
+               ether_addr_copy(adapter->mac_table[i].addr, addr);
+               adapter->mac_table[i].queue = queue;
+               ixgbe_sync_mac_table(adapter);
+               return i;
+       }
+       return -ENOMEM;
+}
+
+int ixgbe_del_mac_filter(struct ixgbe_adapter *adapter, u8 *addr, u16 queue)
+{
+       /* search table for addr, if found, set to 0 and sync */
+       int i;
+       struct ixgbe_hw *hw = &adapter->hw;
+
+       if (is_zero_ether_addr(addr))
+               return -EINVAL;
+
+       for (i = 0; i < hw->mac.num_rar_entries; i++) {
+               if (ether_addr_equal(addr, adapter->mac_table[i].addr) &&
+                   adapter->mac_table[i].queue == queue) {
+                       adapter->mac_table[i].state |= IXGBE_MAC_STATE_MODIFIED;
+                       adapter->mac_table[i].state &= ~IXGBE_MAC_STATE_IN_USE;
+                       memset(adapter->mac_table[i].addr, 0, ETH_ALEN);
+                       adapter->mac_table[i].queue = 0;
+                       ixgbe_sync_mac_table(adapter);
+                       return 0;
+               }
+       }
+       return -ENOMEM;
+}
 /**
  * ixgbe_write_uc_addr_list - write unicast addresses to RAR table
  * @netdev: network interface device structure
@@ -3857,39 +4007,23 @@ static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter)
  *                0 on no addresses written
  *                X on writing X addresses to the RAR table
  **/
-static int ixgbe_write_uc_addr_list(struct net_device *netdev)
+static int ixgbe_write_uc_addr_list(struct net_device *netdev, int vfn)
 {
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
-       struct ixgbe_hw *hw = &adapter->hw;
-       unsigned int rar_entries = hw->mac.num_rar_entries - 1;
        int count = 0;
 
-       /* In SR-IOV/VMDQ modes significantly less RAR entries are available */
-       if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
-               rar_entries = IXGBE_MAX_PF_MACVLANS - 1;
-
        /* return ENOMEM indicating insufficient memory for addresses */
-       if (netdev_uc_count(netdev) > rar_entries)
+       if (netdev_uc_count(netdev) > ixgbe_available_rars(adapter))
                return -ENOMEM;
 
        if (!netdev_uc_empty(netdev)) {
                struct netdev_hw_addr *ha;
-               /* return error if we do not support writing to RAR table */
-               if (!hw->mac.ops.set_rar)
-                       return -ENOMEM;
-
                netdev_for_each_uc_addr(ha, netdev) {
-                       if (!rar_entries)
-                               break;
-                       hw->mac.ops.set_rar(hw, rar_entries--, ha->addr,
-                                           VMDQ_P(0), IXGBE_RAH_AV);
+                       ixgbe_del_mac_filter(adapter, ha->addr, vfn);
+                       ixgbe_add_mac_filter(adapter, ha->addr, vfn);
                        count++;
                }
        }
-       /* write the addresses in reverse order to avoid write combining */
-       for (; rar_entries > 0 ; rar_entries--)
-               hw->mac.ops.clear_rar(hw, rar_entries);
-
        return count;
 }
 
@@ -3907,11 +4041,12 @@ void ixgbe_set_rx_mode(struct net_device *netdev)
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
        struct ixgbe_hw *hw = &adapter->hw;
        u32 fctrl, vmolr = IXGBE_VMOLR_BAM | IXGBE_VMOLR_AUPE;
+       u32 vlnctrl;
        int count;
 
        /* Check for Promiscuous and All Multicast modes */
-
        fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
+       vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
 
        /* set all bits that we expect to always be set */
        fctrl &= ~IXGBE_FCTRL_SBP; /* disable store-bad-packets */
@@ -3921,26 +4056,24 @@ void ixgbe_set_rx_mode(struct net_device *netdev)
 
        /* clear the bits we are changing the status of */
        fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
-
+       vlnctrl &= ~(IXGBE_VLNCTRL_VFE | IXGBE_VLNCTRL_CFIEN);
        if (netdev->flags & IFF_PROMISC) {
                hw->addr_ctrl.user_set_promisc = true;
                fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
-               vmolr |= (IXGBE_VMOLR_ROPE | IXGBE_VMOLR_MPE);
+               vmolr |= IXGBE_VMOLR_MPE;
                /* Only disable hardware filter vlans in promiscuous mode
                 * if SR-IOV and VMDQ are disabled - otherwise ensure
                 * that hardware VLAN filters remain enabled.
                 */
                if (!(adapter->flags & (IXGBE_FLAG_VMDQ_ENABLED |
                                        IXGBE_FLAG_SRIOV_ENABLED)))
-                       ixgbe_vlan_filter_disable(adapter);
-               else
-                       ixgbe_vlan_filter_enable(adapter);
+                       vlnctrl |= (IXGBE_VLNCTRL_VFE | IXGBE_VLNCTRL_CFIEN);
        } else {
                if (netdev->flags & IFF_ALLMULTI) {
                        fctrl |= IXGBE_FCTRL_MPE;
                        vmolr |= IXGBE_VMOLR_MPE;
                }
-               ixgbe_vlan_filter_enable(adapter);
+               vlnctrl |= IXGBE_VLNCTRL_VFE;
                hw->addr_ctrl.user_set_promisc = false;
        }
 
@@ -3949,7 +4082,7 @@ void ixgbe_set_rx_mode(struct net_device *netdev)
         * sufficient space to store all the addresses then enable
         * unicast promiscuous mode
         */
-       count = ixgbe_write_uc_addr_list(netdev);
+       count = ixgbe_write_uc_addr_list(netdev, VMDQ_P(0));
        if (count < 0) {
                fctrl |= IXGBE_FCTRL_UPE;
                vmolr |= IXGBE_VMOLR_ROPE;
@@ -3959,11 +4092,13 @@ void ixgbe_set_rx_mode(struct net_device *netdev)
         * then we should just turn on promiscuous mode so
         * that we can at least receive multicast traffic
         */
-       hw->mac.ops.update_mc_addr_list(hw, netdev);
-       vmolr |= IXGBE_VMOLR_ROMPE;
-
-       if (adapter->num_vfs)
-               ixgbe_restore_vf_multicasts(adapter);
+       count = ixgbe_write_mc_addr_list(netdev);
+       if (count < 0) {
+               fctrl |= IXGBE_FCTRL_MPE;
+               vmolr |= IXGBE_VMOLR_MPE;
+       } else if (count) {
+               vmolr |= IXGBE_VMOLR_ROMPE;
+       }
 
        if (hw->mac.type != ixgbe_mac_82598EB) {
                vmolr |= IXGBE_READ_REG(hw, IXGBE_VMOLR(VMDQ_P(0))) &
@@ -3984,6 +4119,7 @@ void ixgbe_set_rx_mode(struct net_device *netdev)
                /* NOTE:  VLAN filtering is disabled by setting PROMISC */
        }
 
+       IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
        IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
 
        if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
@@ -4100,8 +4236,8 @@ static int ixgbe_hpbthresh(struct ixgbe_adapter *adapter, int pb)
            (tc < IXGBE_FCOE_JUMBO_FRAME_SIZE) &&
            (pb == ixgbe_fcoe_get_tc(adapter)))
                tc = IXGBE_FCOE_JUMBO_FRAME_SIZE;
-
 #endif
+
        /* Calculate delay value for device */
        switch (hw->mac.type) {
        case ixgbe_mac_X540:
@@ -4142,7 +4278,7 @@ static int ixgbe_hpbthresh(struct ixgbe_adapter *adapter, int pb)
  * @adapter: board private structure to calculate for
  * @pb: packet buffer to calculate
  */
-static int ixgbe_lpbthresh(struct ixgbe_adapter *adapter)
+static int ixgbe_lpbthresh(struct ixgbe_adapter *adapter, int pb)
 {
        struct ixgbe_hw *hw = &adapter->hw;
        struct net_device *dev = adapter->netdev;
@@ -4152,6 +4288,14 @@ static int ixgbe_lpbthresh(struct ixgbe_adapter *adapter)
        /* Calculate max LAN frame size */
        tc = dev->mtu + ETH_HLEN + ETH_FCS_LEN;
 
+#ifdef IXGBE_FCOE
+       /* FCoE traffic class uses FCOE jumbo frames */
+       if ((dev->features & NETIF_F_FCOE_MTU) &&
+           (tc < IXGBE_FCOE_JUMBO_FRAME_SIZE) &&
+           (pb == netdev_get_prio_tc_map(dev, adapter->fcoe.up)))
+               tc = IXGBE_FCOE_JUMBO_FRAME_SIZE;
+#endif
+
        /* Calculate delay value for device */
        switch (hw->mac.type) {
        case ixgbe_mac_X540:
@@ -4178,15 +4322,17 @@ static void ixgbe_pbthresh_setup(struct ixgbe_adapter *adapter)
        if (!num_tc)
                num_tc = 1;
 
-       hw->fc.low_water = ixgbe_lpbthresh(adapter);
-
        for (i = 0; i < num_tc; i++) {
                hw->fc.high_water[i] = ixgbe_hpbthresh(adapter, i);
+               hw->fc.low_water[i] = ixgbe_lpbthresh(adapter, i);
 
                /* Low water marks must not be larger than high water marks */
-               if (hw->fc.low_water > hw->fc.high_water[i])
-                       hw->fc.low_water = 0;
+               if (hw->fc.low_water[i] > hw->fc.high_water[i])
+                       hw->fc.low_water[i] = 0;
        }
+
+       for (; i < MAX_TRAFFIC_CLASS; i++)
+               hw->fc.high_water[i] = 0;
 }
 
 static void ixgbe_configure_pb(struct ixgbe_adapter *adapter)
@@ -4248,20 +4394,10 @@ static void ixgbe_macvlan_set_rx_mode(struct net_device *dev, unsigned int pool,
                vmolr |= IXGBE_VMOLR_ROMPE;
                hw->mac.ops.update_mc_addr_list(hw, dev);
        }
-       ixgbe_write_uc_addr_list(adapter->netdev);
+       ixgbe_write_uc_addr_list(adapter->netdev, pool);
        IXGBE_WRITE_REG(hw, IXGBE_VMOLR(pool), vmolr);
 }
 
-static void ixgbe_add_mac_filter(struct ixgbe_adapter *adapter,
-                                u8 *addr, u16 pool)
-{
-       struct ixgbe_hw *hw = &adapter->hw;
-       unsigned int entry;
-
-       entry = hw->mac.num_rar_entries - pool;
-       hw->mac.ops.set_rar(hw, entry, addr, VMDQ_P(pool), IXGBE_RAH_AV);
-}
-
 static void ixgbe_fwd_psrtype(struct ixgbe_fwd_adapter *vadapter)
 {
        struct ixgbe_adapter *adapter = vadapter->real_adapter;
@@ -4741,7 +4877,9 @@ void ixgbe_up(struct ixgbe_adapter *adapter)
 void ixgbe_reset(struct ixgbe_adapter *adapter)
 {
        struct ixgbe_hw *hw = &adapter->hw;
+       struct net_device *netdev = adapter->netdev;
        int err;
+       u8 old_addr[ETH_ALEN];
 
        if (ixgbe_removed(hw->hw_addr))
                return;
@@ -4777,9 +4915,10 @@ void ixgbe_reset(struct ixgbe_adapter *adapter)
        }
 
        clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state);
-
-       /* reprogram the RAR[0] in case user changed it. */
-       hw->mac.ops.set_rar(hw, 0, hw->mac.addr, VMDQ_P(0), IXGBE_RAH_AV);
+       /* do not flush user set addresses */
+       memcpy(old_addr, &adapter->mac_table[0].addr, netdev->addr_len);
+       ixgbe_flush_sw_mac_table(adapter);
+       ixgbe_mac_set_default_filter(adapter, old_addr);
 
        /* update SAN MAC vmdq pool selection */
        if (hw->mac.san_mac_rar_index)
@@ -5025,6 +5164,10 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter)
 #endif /* CONFIG_IXGBE_DCB */
 #endif /* IXGBE_FCOE */
 
+       adapter->mac_table = kzalloc(sizeof(struct ixgbe_mac_addr) *
+                                    hw->mac.num_rar_entries,
+                                    GFP_ATOMIC);
+
        /* Set MAC specific capability flags and exceptions */
        switch (hw->mac.type) {
        case ixgbe_mac_82598EB:
@@ -7171,16 +7314,17 @@ static int ixgbe_set_mac(struct net_device *netdev, void *p)
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
        struct ixgbe_hw *hw = &adapter->hw;
        struct sockaddr *addr = p;
+       int ret;
 
        if (!is_valid_ether_addr(addr->sa_data))
                return -EADDRNOTAVAIL;
 
+       ixgbe_del_mac_filter(adapter, hw->mac.addr, VMDQ_P(0));
        memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
        memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
 
-       hw->mac.ops.set_rar(hw, 0, hw->mac.addr, VMDQ_P(0), IXGBE_RAH_AV);
-
-       return 0;
+       ret = ixgbe_add_mac_filter(adapter, hw->mac.addr, VMDQ_P(0));
+       return ret > 0 ? 0 : ret;
 }
 
 static int
@@ -8186,6 +8330,8 @@ skip_sriov:
                goto err_sw_init;
        }
 
+       ixgbe_mac_set_default_filter(adapter, hw->mac.perm_addr);
+
        setup_timer(&adapter->service_timer, &ixgbe_service_timer,
                    (unsigned long) adapter);
 
@@ -8318,6 +8464,7 @@ err_sw_init:
        ixgbe_disable_sriov(adapter);
        adapter->flags2 &= ~IXGBE_FLAG2_SEARCH_FOR_SFP;
        iounmap(adapter->io_addr);
+       kfree(adapter->mac_table);
 err_ioremap:
        free_netdev(netdev);
 err_alloc_etherdev:
@@ -8391,6 +8538,7 @@ static void ixgbe_remove(struct pci_dev *pdev)
 
        e_dev_info("complete\n");
 
+       kfree(adapter->mac_table);
        free_netdev(netdev);
 
        pci_disable_pcie_error_reporting(pdev);
index 23f765263f12479822654a9af55263a8db233bd7..a76af8e28a04be16386c444cc947581638f3befa 100644 (file)
@@ -536,7 +536,7 @@ s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw)
 
        if (time_out == max_time_out) {
                status = IXGBE_ERR_LINK_SETUP;
-               hw_dbg(hw, "ixgbe_setup_phy_link_generic: time out");
+               hw_dbg(hw, "ixgbe_setup_phy_link_generic: time out\n");
        }
 
        return status;
@@ -745,7 +745,7 @@ s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw)
 
        if (time_out == max_time_out) {
                status = IXGBE_ERR_LINK_SETUP;
-               hw_dbg(hw, "ixgbe_setup_phy_link_tnx: time out");
+               hw_dbg(hw, "ixgbe_setup_phy_link_tnx: time out\n");
        }
 
        return status;
@@ -1175,7 +1175,7 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
                                status = 0;
                        } else {
                                if (hw->allow_unsupported_sfp) {
-                                       e_warn(drv, "WARNING: Intel (R) Network Connections are quality tested using Intel (R) Ethernet Optics.  Using untested modules is not supported and may cause unstable operation or damage to the module or the adapter.  Intel Corporation is not responsible for any harm caused by using untested modules.");
+                                       e_warn(drv, "WARNING: Intel (R) Network Connections are quality tested using Intel (R) Ethernet Optics.  Using untested modules is not supported and may cause unstable operation or damage to the module or the adapter.  Intel Corporation is not responsible for any harm caused by using untested modules.\n");
                                        status = 0;
                                } else {
                                        hw_dbg(hw,
index 63515a6f67fae073b40bad8c58abc55a6c238517..8902ae68345770ce5f28ba2ab91ee73bab3c7ff3 100644 (file)
@@ -435,10 +435,8 @@ void ixgbe_ptp_overflow_check(struct ixgbe_adapter *adapter)
 void ixgbe_ptp_rx_hang(struct ixgbe_adapter *adapter)
 {
        struct ixgbe_hw *hw = &adapter->hw;
-       struct ixgbe_ring *rx_ring;
        u32 tsyncrxctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL);
        unsigned long rx_event;
-       int n;
 
        /* if we don't have a valid timestamp in the registers, just update the
         * timeout counter and exit
@@ -450,18 +448,15 @@ void ixgbe_ptp_rx_hang(struct ixgbe_adapter *adapter)
 
        /* determine the most recent watchdog or rx_timestamp event */
        rx_event = adapter->last_rx_ptp_check;
-       for (n = 0; n < adapter->num_rx_queues; n++) {
-               rx_ring = adapter->rx_ring[n];
-               if (time_after(rx_ring->last_rx_timestamp, rx_event))
-                       rx_event = rx_ring->last_rx_timestamp;
-       }
+       if (time_after(adapter->last_rx_timestamp, rx_event))
+               rx_event = adapter->last_rx_timestamp;
 
        /* only need to read the high RXSTMP register to clear the lock */
        if (time_is_before_jiffies(rx_event + 5*HZ)) {
                IXGBE_READ_REG(hw, IXGBE_RXSTMPH);
                adapter->last_rx_ptp_check = jiffies;
 
-               e_warn(drv, "clearing RX Timestamp hang");
+               e_warn(drv, "clearing RX Timestamp hang\n");
        }
 }
 
@@ -517,7 +512,7 @@ static void ixgbe_ptp_tx_hwtstamp_work(struct work_struct *work)
                dev_kfree_skb_any(adapter->ptp_tx_skb);
                adapter->ptp_tx_skb = NULL;
                clear_bit_unlock(__IXGBE_PTP_TX_IN_PROGRESS, &adapter->state);
-               e_warn(drv, "clearing Tx Timestamp hang");
+               e_warn(drv, "clearing Tx Timestamp hang\n");
                return;
        }
 
@@ -530,35 +525,22 @@ static void ixgbe_ptp_tx_hwtstamp_work(struct work_struct *work)
 }
 
 /**
- * __ixgbe_ptp_rx_hwtstamp - utility function which checks for RX time stamp
- * @q_vector: structure containing interrupt and ring information
+ * ixgbe_ptp_rx_hwtstamp - utility function which checks for RX time stamp
+ * @adapter: pointer to adapter struct
  * @skb: particular skb to send timestamp with
  *
  * if the timestamp is valid, we convert it into the timecounter ns
  * value, then store that result into the shhwtstamps structure which
  * is passed up the network stack
  */
-void __ixgbe_ptp_rx_hwtstamp(struct ixgbe_q_vector *q_vector,
-                            struct sk_buff *skb)
+void ixgbe_ptp_rx_hwtstamp(struct ixgbe_adapter *adapter, struct sk_buff *skb)
 {
-       struct ixgbe_adapter *adapter;
-       struct ixgbe_hw *hw;
+       struct ixgbe_hw *hw = &adapter->hw;
        struct skb_shared_hwtstamps *shhwtstamps;
        u64 regval = 0, ns;
        u32 tsyncrxctl;
        unsigned long flags;
 
-       /* we cannot process timestamps on a ring without a q_vector */
-       if (!q_vector || !q_vector->adapter)
-               return;
-
-       adapter = q_vector->adapter;
-       hw = &adapter->hw;
-
-       /*
-        * Read the tsyncrxctl register afterwards in order to prevent taking an
-        * I/O hit on every packet.
-        */
        tsyncrxctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL);
        if (!(tsyncrxctl & IXGBE_TSYNCRXCTL_VALID))
                return;
@@ -566,13 +548,17 @@ void __ixgbe_ptp_rx_hwtstamp(struct ixgbe_q_vector *q_vector,
        regval |= (u64)IXGBE_READ_REG(hw, IXGBE_RXSTMPL);
        regval |= (u64)IXGBE_READ_REG(hw, IXGBE_RXSTMPH) << 32;
 
-
        spin_lock_irqsave(&adapter->tmreg_lock, flags);
        ns = timecounter_cyc2time(&adapter->tc, regval);
        spin_unlock_irqrestore(&adapter->tmreg_lock, flags);
 
        shhwtstamps = skb_hwtstamps(skb);
        shhwtstamps->hwtstamp = ns_to_ktime(ns);
+
+       /* Update the last_rx_timestamp timer in order to enable watchdog check
+        * for error case of latched timestamp on a dropped packet.
+        */
+       adapter->last_rx_timestamp = jiffies;
 }
 
 int ixgbe_ptp_get_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr)
index e6c68d396c992fffb329a1cca4daadb47169faab..a01417c066208e147cafd9134eed78efb7006d28 100644 (file)
@@ -72,8 +72,6 @@ static int __ixgbe_enable_sriov(struct ixgbe_adapter *adapter)
                for (i = 0; i < num_vf_macvlans; i++) {
                        mv_list->vf = -1;
                        mv_list->free = true;
-                       mv_list->rar_entry = hw->mac.num_rar_entries -
-                               (i + adapter->num_vfs + 1);
                        list_add(&mv_list->l, &adapter->vf_mvs.l);
                        mv_list++;
                }
@@ -327,6 +325,7 @@ static int ixgbe_set_vf_multicasts(struct ixgbe_adapter *adapter,
        u32 vector_bit;
        u32 vector_reg;
        u32 mta_reg;
+       u32 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf));
 
        /* only so many hash values supported */
        entries = min(entries, IXGBE_MAX_VF_MC_ENTRIES);
@@ -353,25 +352,13 @@ static int ixgbe_set_vf_multicasts(struct ixgbe_adapter *adapter,
                mta_reg |= (1 << vector_bit);
                IXGBE_WRITE_REG(hw, IXGBE_MTA(vector_reg), mta_reg);
        }
+       vmolr |= IXGBE_VMOLR_ROMPE;
+       IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr);
 
        return 0;
 }
 
-static void ixgbe_restore_vf_macvlans(struct ixgbe_adapter *adapter)
-{
-       struct ixgbe_hw *hw = &adapter->hw;
-       struct list_head *pos;
-       struct vf_macvlans *entry;
-
-       list_for_each(pos, &adapter->vf_mvs.l) {
-               entry = list_entry(pos, struct vf_macvlans, l);
-               if (!entry->free)
-                       hw->mac.ops.set_rar(hw, entry->rar_entry,
-                                           entry->vf_macvlan,
-                                           entry->vf, IXGBE_RAH_AV);
-       }
-}
-
+#ifdef CONFIG_PCI_IOV
 void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter)
 {
        struct ixgbe_hw *hw = &adapter->hw;
@@ -382,6 +369,7 @@ void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter)
        u32 mta_reg;
 
        for (i = 0; i < adapter->num_vfs; i++) {
+               u32 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(i));
                vfinfo = &adapter->vfinfo[i];
                for (j = 0; j < vfinfo->num_vf_mc_hashes; j++) {
                        hw->addr_ctrl.mta_in_use++;
@@ -391,11 +379,18 @@ void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter)
                        mta_reg |= (1 << vector_bit);
                        IXGBE_WRITE_REG(hw, IXGBE_MTA(vector_reg), mta_reg);
                }
+
+               if (vfinfo->num_vf_mc_hashes)
+                       vmolr |= IXGBE_VMOLR_ROMPE;
+               else
+                       vmolr &= ~IXGBE_VMOLR_ROMPE;
+               IXGBE_WRITE_REG(hw, IXGBE_VMOLR(i), vmolr);
        }
 
        /* Restore any VF macvlans */
-       ixgbe_restore_vf_macvlans(adapter);
+       ixgbe_full_sync_mac_table(adapter);
 }
+#endif
 
 static int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid,
                             u32 vf)
@@ -495,8 +490,7 @@ static s32 ixgbe_set_vf_lpe(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf)
 static void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf, bool aupe)
 {
        u32 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf));
-       vmolr |= (IXGBE_VMOLR_ROMPE |
-                 IXGBE_VMOLR_BAM);
+       vmolr |= IXGBE_VMOLR_BAM;
        if (aupe)
                vmolr |= IXGBE_VMOLR_AUPE;
        else
@@ -514,7 +508,6 @@ static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf)
 {
        struct ixgbe_hw *hw = &adapter->hw;
        struct vf_data_storage *vfinfo = &adapter->vfinfo[vf];
-       int rar_entry = hw->mac.num_rar_entries - (vf + 1);
        u8 num_tcs = netdev_get_num_tc(adapter->netdev);
 
        /* add PF assigned VLAN or VLAN 0 */
@@ -544,7 +537,7 @@ static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf)
        /* Flush and reset the mta with the new values */
        ixgbe_set_rx_mode(adapter->netdev);
 
-       hw->mac.ops.clear_rar(hw, rar_entry);
+       ixgbe_del_mac_filter(adapter, adapter->vfinfo[vf].vf_mac_addresses, vf);
 
        /* reset VF api back to unknown */
        adapter->vfinfo[vf].vf_api = ixgbe_mbox_api_10;
@@ -553,11 +546,9 @@ static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf)
 static int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter,
                            int vf, unsigned char *mac_addr)
 {
-       struct ixgbe_hw *hw = &adapter->hw;
-       int rar_entry = hw->mac.num_rar_entries - (vf + 1);
-
+       ixgbe_del_mac_filter(adapter, adapter->vfinfo[vf].vf_mac_addresses, vf);
        memcpy(adapter->vfinfo[vf].vf_mac_addresses, mac_addr, ETH_ALEN);
-       hw->mac.ops.set_rar(hw, rar_entry, mac_addr, vf, IXGBE_RAH_AV);
+       ixgbe_add_mac_filter(adapter, adapter->vfinfo[vf].vf_mac_addresses, vf);
 
        return 0;
 }
@@ -565,7 +556,6 @@ static int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter,
 static int ixgbe_set_vf_macvlan(struct ixgbe_adapter *adapter,
                                int vf, int index, unsigned char *mac_addr)
 {
-       struct ixgbe_hw *hw = &adapter->hw;
        struct list_head *pos;
        struct vf_macvlans *entry;
 
@@ -576,7 +566,8 @@ static int ixgbe_set_vf_macvlan(struct ixgbe_adapter *adapter,
                                entry->vf = -1;
                                entry->free = true;
                                entry->is_macvlan = false;
-                               hw->mac.ops.clear_rar(hw, entry->rar_entry);
+                               ixgbe_del_mac_filter(adapter,
+                                                    entry->vf_macvlan, vf);
                        }
                }
        }
@@ -612,7 +603,7 @@ static int ixgbe_set_vf_macvlan(struct ixgbe_adapter *adapter,
        entry->vf = vf;
        memcpy(entry->vf_macvlan, mac_addr, ETH_ALEN);
 
-       hw->mac.ops.set_rar(hw, entry->rar_entry, mac_addr, vf, IXGBE_RAH_AV);
+       ixgbe_add_mac_filter(adapter, mac_addr, vf);
 
        return 0;
 }
index 139eaddfb2ed5c8c14a3891137a313b4fcaa3a6a..cea64014760497597710a0697b933771ac75f3a2 100644 (file)
@@ -34,7 +34,9 @@
  */
 #define IXGBE_MAX_VFS_DRV_LIMIT  (IXGBE_MAX_VF_FUNCTIONS - 1)
 
+#ifdef CONFIG_PCI_IOV
 void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter);
+#endif
 void ixgbe_msg_task(struct ixgbe_adapter *adapter);
 int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask);
 void ixgbe_disable_tx_rx(struct ixgbe_adapter *adapter);
index 8a6ff2423f076974d1c3c408b97c497d00bdc277..551d6089a4d3eb802e64f010f7abff5dc9ae1a94 100644 (file)
@@ -2746,7 +2746,7 @@ struct ixgbe_bus_info {
 /* Flow control parameters */
 struct ixgbe_fc_info {
        u32 high_water[MAX_TRAFFIC_CLASS]; /* Flow Control High-water */
-       u32 low_water; /* Flow Control Low-water */
+       u32 low_water[MAX_TRAFFIC_CLASS]; /* Flow Control Low-water */
        u16 pause_time; /* Flow Control Pause timer */
        bool send_xon; /* Flow control send XON */
        bool strict_ieee; /* Strict IEEE mode */
index 1baecb60f0657e20a4500380ece36a72f908c3d9..a757f07347193efa8fcfd47705c614d1bc854805 100644 (file)
@@ -813,5 +813,5 @@ static const struct ethtool_ops ixgbevf_ethtool_ops = {
 
 void ixgbevf_set_ethtool_ops(struct net_device *netdev)
 {
-       SET_ETHTOOL_OPS(netdev, &ixgbevf_ethtool_ops);
+       netdev->ethtool_ops = &ixgbevf_ethtool_ops;
 }
index d0799e8e31e4ea08e5dc89ec64b58c76bd5840a3..eacce3a2e9eca0f2336a1a2ab2eec732558dbb59 100644 (file)
@@ -85,7 +85,7 @@ static DEFINE_PCI_DEVICE_TABLE(ixgbevf_pci_tbl) = {
 MODULE_DEVICE_TABLE(pci, ixgbevf_pci_tbl);
 
 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
-MODULE_DESCRIPTION("Intel(R) 82599 Virtual Function Driver");
+MODULE_DESCRIPTION("Intel(R) 10 Gigabit Virtual Function Network Driver");
 MODULE_LICENSE("GPL");
 MODULE_VERSION(DRV_VERSION);
 
index b0c6050479eb460ae306cccaa93926f738e64c2e..6e664d9038d60df53b36b3869897cb49582fca16 100644 (file)
@@ -1988,7 +1988,7 @@ jme_alloc_txdesc(struct jme_adapter *jme,
        return idx;
 }
 
-static void
+static int
 jme_fill_tx_map(struct pci_dev *pdev,
                struct txdesc *txdesc,
                struct jme_buffer_info *txbi,
@@ -2005,6 +2005,9 @@ jme_fill_tx_map(struct pci_dev *pdev,
                                len,
                                PCI_DMA_TODEVICE);
 
+       if (unlikely(pci_dma_mapping_error(pdev, dmaaddr)))
+               return -EINVAL;
+
        pci_dma_sync_single_for_device(pdev,
                                       dmaaddr,
                                       len,
@@ -2021,9 +2024,30 @@ jme_fill_tx_map(struct pci_dev *pdev,
 
        txbi->mapping = dmaaddr;
        txbi->len = len;
+       return 0;
 }
 
-static void
+static void jme_drop_tx_map(struct jme_adapter *jme, int startidx, int endidx)
+{
+       struct jme_ring *txring = &(jme->txring[0]);
+       struct jme_buffer_info *txbi = txring->bufinf, *ctxbi;
+       int mask = jme->tx_ring_mask;
+       int j;
+
+       for (j = startidx ; j < endidx ; ++j) {
+               ctxbi = txbi + ((startidx + j + 2) & (mask));
+               pci_unmap_page(jme->pdev,
+                               ctxbi->mapping,
+                               ctxbi->len,
+                               PCI_DMA_TODEVICE);
+
+                               ctxbi->mapping = 0;
+                               ctxbi->len = 0;
+       }
+
+}
+
+static int
 jme_map_tx_skb(struct jme_adapter *jme, struct sk_buff *skb, int idx)
 {
        struct jme_ring *txring = &(jme->txring[0]);
@@ -2034,25 +2058,37 @@ jme_map_tx_skb(struct jme_adapter *jme, struct sk_buff *skb, int idx)
        int mask = jme->tx_ring_mask;
        const struct skb_frag_struct *frag;
        u32 len;
+       int ret = 0;
 
        for (i = 0 ; i < nr_frags ; ++i) {
                frag = &skb_shinfo(skb)->frags[i];
                ctxdesc = txdesc + ((idx + i + 2) & (mask));
                ctxbi = txbi + ((idx + i + 2) & (mask));
 
-               jme_fill_tx_map(jme->pdev, ctxdesc, ctxbi,
+               ret = jme_fill_tx_map(jme->pdev, ctxdesc, ctxbi,
                                skb_frag_page(frag),
                                frag->page_offset, skb_frag_size(frag), hidma);
+               if (ret) {
+                       jme_drop_tx_map(jme, idx, idx+i);
+                       goto out;
+               }
+
        }
 
        len = skb_is_nonlinear(skb) ? skb_headlen(skb) : skb->len;
        ctxdesc = txdesc + ((idx + 1) & (mask));
        ctxbi = txbi + ((idx + 1) & (mask));
-       jme_fill_tx_map(jme->pdev, ctxdesc, ctxbi, virt_to_page(skb->data),
+       ret = jme_fill_tx_map(jme->pdev, ctxdesc, ctxbi, virt_to_page(skb->data),
                        offset_in_page(skb->data), len, hidma);
+       if (ret)
+               jme_drop_tx_map(jme, idx, idx+i);
+
+out:
+       return ret;
 
 }
 
+
 static int
 jme_tx_tso(struct sk_buff *skb, __le16 *mss, u8 *flags)
 {
@@ -2131,6 +2167,7 @@ jme_fill_tx_desc(struct jme_adapter *jme, struct sk_buff *skb, int idx)
        struct txdesc *txdesc;
        struct jme_buffer_info *txbi;
        u8 flags;
+       int ret = 0;
 
        txdesc = (struct txdesc *)txring->desc + idx;
        txbi = txring->bufinf + idx;
@@ -2155,7 +2192,10 @@ jme_fill_tx_desc(struct jme_adapter *jme, struct sk_buff *skb, int idx)
        if (jme_tx_tso(skb, &txdesc->desc1.mss, &flags))
                jme_tx_csum(jme, skb, &flags);
        jme_tx_vlan(skb, &txdesc->desc1.vlan, &flags);
-       jme_map_tx_skb(jme, skb, idx);
+       ret = jme_map_tx_skb(jme, skb, idx);
+       if (ret)
+               return ret;
+
        txdesc->desc1.flags = flags;
        /*
         * Set tx buffer info after telling NIC to send
@@ -2228,7 +2268,8 @@ jme_start_xmit(struct sk_buff *skb, struct net_device *netdev)
                return NETDEV_TX_BUSY;
        }
 
-       jme_fill_tx_desc(jme, skb, idx);
+       if (jme_fill_tx_desc(jme, skb, idx))
+               return NETDEV_TX_BUSY;
 
        jwrite32(jme, JME_TXCS, jme->reg_txcs |
                                TXCS_SELECT_QUEUE0 |
index b7b8d74c22d9c6f7e7f9aaa7c9211722ec5929fd..3b0f818a4f5c2e82f415cde0d0b7bc4437450da4 100644 (file)
@@ -42,6 +42,7 @@
 #include <linux/dma-mapping.h>
 #include <linux/in.h>
 #include <linux/ip.h>
+#include <net/tso.h>
 #include <linux/tcp.h>
 #include <linux/udp.h>
 #include <linux/etherdevice.h>
@@ -179,9 +180,10 @@ static char mv643xx_eth_driver_version[] = "1.4";
  * Misc definitions.
  */
 #define DEFAULT_RX_QUEUE_SIZE  128
-#define DEFAULT_TX_QUEUE_SIZE  256
+#define DEFAULT_TX_QUEUE_SIZE  512
 #define SKB_DMA_REALIGN                ((PAGE_SIZE - NET_SKB_PAD) % SMP_CACHE_BYTES)
 
+#define TSO_HEADER_SIZE                128
 
 /*
  * RX/TX descriptors.
@@ -250,6 +252,7 @@ struct tx_desc {
 #define GEN_TCP_UDP_CHECKSUM           0x00020000
 #define UDP_FRAME                      0x00010000
 #define MAC_HDR_EXTRA_4_BYTES          0x00008000
+#define GEN_TCP_UDP_CHK_FULL           0x00000400
 #define MAC_HDR_EXTRA_8_BYTES          0x00000200
 
 #define TX_IHL_SHIFT                   11
@@ -345,6 +348,9 @@ struct tx_queue {
        int tx_curr_desc;
        int tx_used_desc;
 
+       char *tso_hdrs;
+       dma_addr_t tso_hdrs_dma;
+
        struct tx_desc *tx_desc_area;
        dma_addr_t tx_desc_dma;
        int tx_desc_area_size;
@@ -661,6 +667,198 @@ static inline unsigned int has_tiny_unaligned_frags(struct sk_buff *skb)
        return 0;
 }
 
+static inline __be16 sum16_as_be(__sum16 sum)
+{
+       return (__force __be16)sum;
+}
+
+static int skb_tx_csum(struct mv643xx_eth_private *mp, struct sk_buff *skb,
+                      u16 *l4i_chk, u32 *command, int length)
+{
+       int ret;
+       u32 cmd = 0;
+
+       if (skb->ip_summed == CHECKSUM_PARTIAL) {
+               int hdr_len;
+               int tag_bytes;
+
+               BUG_ON(skb->protocol != htons(ETH_P_IP) &&
+                      skb->protocol != htons(ETH_P_8021Q));
+
+               hdr_len = (void *)ip_hdr(skb) - (void *)skb->data;
+               tag_bytes = hdr_len - ETH_HLEN;
+
+               if (length - hdr_len > mp->shared->tx_csum_limit ||
+                   unlikely(tag_bytes & ~12)) {
+                       ret = skb_checksum_help(skb);
+                       if (!ret)
+                               goto no_csum;
+                       return ret;
+               }
+
+               if (tag_bytes & 4)
+                       cmd |= MAC_HDR_EXTRA_4_BYTES;
+               if (tag_bytes & 8)
+                       cmd |= MAC_HDR_EXTRA_8_BYTES;
+
+               cmd |= GEN_TCP_UDP_CHECKSUM | GEN_TCP_UDP_CHK_FULL |
+                          GEN_IP_V4_CHECKSUM   |
+                          ip_hdr(skb)->ihl << TX_IHL_SHIFT;
+
+               /* TODO: Revisit this. With the usage of GEN_TCP_UDP_CHK_FULL
+                * it seems we don't need to pass the initial checksum. */
+               switch (ip_hdr(skb)->protocol) {
+               case IPPROTO_UDP:
+                       cmd |= UDP_FRAME;
+                       *l4i_chk = 0;
+                       break;
+               case IPPROTO_TCP:
+                       *l4i_chk = 0;
+                       break;
+               default:
+                       WARN(1, "protocol not supported");
+               }
+       } else {
+no_csum:
+               /* Errata BTS #50, IHL must be 5 if no HW checksum */
+               cmd |= 5 << TX_IHL_SHIFT;
+       }
+       *command = cmd;
+       return 0;
+}
+
+static inline int
+txq_put_data_tso(struct net_device *dev, struct tx_queue *txq,
+                struct sk_buff *skb, char *data, int length,
+                bool last_tcp, bool is_last)
+{
+       int tx_index;
+       u32 cmd_sts;
+       struct tx_desc *desc;
+
+       tx_index = txq->tx_curr_desc++;
+       if (txq->tx_curr_desc == txq->tx_ring_size)
+               txq->tx_curr_desc = 0;
+       desc = &txq->tx_desc_area[tx_index];
+
+       desc->l4i_chk = 0;
+       desc->byte_cnt = length;
+       desc->buf_ptr = dma_map_single(dev->dev.parent, data,
+                                      length, DMA_TO_DEVICE);
+       if (unlikely(dma_mapping_error(dev->dev.parent, desc->buf_ptr))) {
+               WARN(1, "dma_map_single failed!\n");
+               return -ENOMEM;
+       }
+
+       cmd_sts = BUFFER_OWNED_BY_DMA;
+       if (last_tcp) {
+               /* last descriptor in the TCP packet */
+               cmd_sts |= ZERO_PADDING | TX_LAST_DESC;
+               /* last descriptor in SKB */
+               if (is_last)
+                       cmd_sts |= TX_ENABLE_INTERRUPT;
+       }
+       desc->cmd_sts = cmd_sts;
+       return 0;
+}
+
+static inline void
+txq_put_hdr_tso(struct sk_buff *skb, struct tx_queue *txq, int length)
+{
+       struct mv643xx_eth_private *mp = txq_to_mp(txq);
+       int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+       int tx_index;
+       struct tx_desc *desc;
+       int ret;
+       u32 cmd_csum = 0;
+       u16 l4i_chk = 0;
+
+       tx_index = txq->tx_curr_desc;
+       desc = &txq->tx_desc_area[tx_index];
+
+       ret = skb_tx_csum(mp, skb, &l4i_chk, &cmd_csum, length);
+       if (ret)
+               WARN(1, "failed to prepare checksum!");
+
+       /* Should we set this? Can't use the value from skb_tx_csum()
+        * as it's not the correct initial L4 checksum to use. */
+       desc->l4i_chk = 0;
+
+       desc->byte_cnt = hdr_len;
+       desc->buf_ptr = txq->tso_hdrs_dma +
+                       txq->tx_curr_desc * TSO_HEADER_SIZE;
+       desc->cmd_sts = cmd_csum | BUFFER_OWNED_BY_DMA  | TX_FIRST_DESC |
+                                  GEN_CRC;
+
+       txq->tx_curr_desc++;
+       if (txq->tx_curr_desc == txq->tx_ring_size)
+               txq->tx_curr_desc = 0;
+}
+
+static int txq_submit_tso(struct tx_queue *txq, struct sk_buff *skb,
+                         struct net_device *dev)
+{
+       struct mv643xx_eth_private *mp = txq_to_mp(txq);
+       int total_len, data_left, ret;
+       int desc_count = 0;
+       struct tso_t tso;
+       int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+
+       /* Count needed descriptors */
+       if ((txq->tx_desc_count + tso_count_descs(skb)) >= txq->tx_ring_size) {
+               netdev_dbg(dev, "not enough descriptors for TSO!\n");
+               return -EBUSY;
+       }
+
+       /* Initialize the TSO handler, and prepare the first payload */
+       tso_start(skb, &tso);
+
+       total_len = skb->len - hdr_len;
+       while (total_len > 0) {
+               char *hdr;
+
+               data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len);
+               total_len -= data_left;
+               desc_count++;
+
+               /* prepare packet headers: MAC + IP + TCP */
+               hdr = txq->tso_hdrs + txq->tx_curr_desc * TSO_HEADER_SIZE;
+               tso_build_hdr(skb, hdr, &tso, data_left, total_len == 0);
+               txq_put_hdr_tso(skb, txq, data_left);
+
+               while (data_left > 0) {
+                       int size;
+                       desc_count++;
+
+                       size = min_t(int, tso.size, data_left);
+                       ret = txq_put_data_tso(dev, txq, skb, tso.data, size,
+                                              size == data_left,
+                                              total_len == 0);
+                       if (ret)
+                               goto err_release;
+                       data_left -= size;
+                       tso_build_data(skb, &tso, size);
+               }
+       }
+
+       __skb_queue_tail(&txq->tx_skb, skb);
+       skb_tx_timestamp(skb);
+
+       /* clear TX_END status */
+       mp->work_tx_end &= ~(1 << txq->index);
+
+       /* ensure all descriptors are written before poking hardware */
+       wmb();
+       txq_enable(txq);
+       txq->tx_desc_count += desc_count;
+       return 0;
+err_release:
+       /* TODO: Release all used data descriptors; header descriptors must not
+        * be DMA-unmapped.
+        */
+       return ret;
+}
+
 static void txq_submit_frag_skb(struct tx_queue *txq, struct sk_buff *skb)
 {
        struct mv643xx_eth_private *mp = txq_to_mp(txq);
@@ -671,8 +869,10 @@ static void txq_submit_frag_skb(struct tx_queue *txq, struct sk_buff *skb)
                skb_frag_t *this_frag;
                int tx_index;
                struct tx_desc *desc;
+               void *addr;
 
                this_frag = &skb_shinfo(skb)->frags[frag];
+               addr = page_address(this_frag->page.p) + this_frag->page_offset;
                tx_index = txq->tx_curr_desc++;
                if (txq->tx_curr_desc == txq->tx_ring_size)
                        txq->tx_curr_desc = 0;
@@ -692,18 +892,11 @@ static void txq_submit_frag_skb(struct tx_queue *txq, struct sk_buff *skb)
 
                desc->l4i_chk = 0;
                desc->byte_cnt = skb_frag_size(this_frag);
-               desc->buf_ptr = skb_frag_dma_map(mp->dev->dev.parent,
-                                                this_frag, 0,
-                                                skb_frag_size(this_frag),
-                                                DMA_TO_DEVICE);
+               desc->buf_ptr = dma_map_single(mp->dev->dev.parent, addr,
+                                              desc->byte_cnt, DMA_TO_DEVICE);
        }
 }
 
-static inline __be16 sum16_as_be(__sum16 sum)
-{
-       return (__force __be16)sum;
-}
-
 static int txq_submit_skb(struct tx_queue *txq, struct sk_buff *skb)
 {
        struct mv643xx_eth_private *mp = txq_to_mp(txq);
@@ -712,53 +905,17 @@ static int txq_submit_skb(struct tx_queue *txq, struct sk_buff *skb)
        struct tx_desc *desc;
        u32 cmd_sts;
        u16 l4i_chk;
-       int length;
+       int length, ret;
 
-       cmd_sts = TX_FIRST_DESC | GEN_CRC | BUFFER_OWNED_BY_DMA;
+       cmd_sts = 0;
        l4i_chk = 0;
 
-       if (skb->ip_summed == CHECKSUM_PARTIAL) {
-               int hdr_len;
-               int tag_bytes;
-
-               BUG_ON(skb->protocol != htons(ETH_P_IP) &&
-                      skb->protocol != htons(ETH_P_8021Q));
-
-               hdr_len = (void *)ip_hdr(skb) - (void *)skb->data;
-               tag_bytes = hdr_len - ETH_HLEN;
-               if (skb->len - hdr_len > mp->shared->tx_csum_limit ||
-                   unlikely(tag_bytes & ~12)) {
-                       if (skb_checksum_help(skb) == 0)
-                               goto no_csum;
-                       dev_kfree_skb_any(skb);
-                       return 1;
-               }
-
-               if (tag_bytes & 4)
-                       cmd_sts |= MAC_HDR_EXTRA_4_BYTES;
-               if (tag_bytes & 8)
-                       cmd_sts |= MAC_HDR_EXTRA_8_BYTES;
-
-               cmd_sts |= GEN_TCP_UDP_CHECKSUM |
-                          GEN_IP_V4_CHECKSUM   |
-                          ip_hdr(skb)->ihl << TX_IHL_SHIFT;
-
-               switch (ip_hdr(skb)->protocol) {
-               case IPPROTO_UDP:
-                       cmd_sts |= UDP_FRAME;
-                       l4i_chk = ntohs(sum16_as_be(udp_hdr(skb)->check));
-                       break;
-               case IPPROTO_TCP:
-                       l4i_chk = ntohs(sum16_as_be(tcp_hdr(skb)->check));
-                       break;
-               default:
-                       BUG();
-               }
-       } else {
-no_csum:
-               /* Errata BTS #50, IHL must be 5 if no HW checksum */
-               cmd_sts |= 5 << TX_IHL_SHIFT;
+       ret = skb_tx_csum(mp, skb, &l4i_chk, &cmd_sts, skb->len);
+       if (ret) {
+               dev_kfree_skb_any(skb);
+               return ret;
        }
+       cmd_sts |= TX_FIRST_DESC | GEN_CRC | BUFFER_OWNED_BY_DMA;
 
        tx_index = txq->tx_curr_desc++;
        if (txq->tx_curr_desc == txq->tx_ring_size)
@@ -801,7 +958,7 @@ no_csum:
 static netdev_tx_t mv643xx_eth_xmit(struct sk_buff *skb, struct net_device *dev)
 {
        struct mv643xx_eth_private *mp = netdev_priv(dev);
-       int length, queue;
+       int length, queue, ret;
        struct tx_queue *txq;
        struct netdev_queue *nq;
 
@@ -825,7 +982,11 @@ static netdev_tx_t mv643xx_eth_xmit(struct sk_buff *skb, struct net_device *dev)
 
        length = skb->len;
 
-       if (!txq_submit_skb(txq, skb)) {
+       if (skb_is_gso(skb))
+               ret = txq_submit_tso(txq, skb, dev);
+       else
+               ret = txq_submit_skb(txq, skb);
+       if (!ret) {
                int entries_left;
 
                txq->tx_bytes += length;
@@ -834,6 +995,8 @@ static netdev_tx_t mv643xx_eth_xmit(struct sk_buff *skb, struct net_device *dev)
                entries_left = txq->tx_ring_size - txq->tx_desc_count;
                if (entries_left < MAX_SKB_FRAGS + 1)
                        netif_tx_stop_queue(nq);
+       } else if (ret == -EBUSY) {
+               return NETDEV_TX_BUSY;
        }
 
        return NETDEV_TX_OK;
@@ -907,14 +1070,8 @@ static int txq_reclaim(struct tx_queue *txq, int budget, int force)
                        mp->dev->stats.tx_errors++;
                }
 
-               if (cmd_sts & TX_FIRST_DESC) {
-                       dma_unmap_single(mp->dev->dev.parent, desc->buf_ptr,
-                                        desc->byte_cnt, DMA_TO_DEVICE);
-               } else {
-                       dma_unmap_page(mp->dev->dev.parent, desc->buf_ptr,
-                                      desc->byte_cnt, DMA_TO_DEVICE);
-               }
-
+               dma_unmap_single(mp->dev->dev.parent, desc->buf_ptr,
+                                desc->byte_cnt, DMA_TO_DEVICE);
                dev_kfree_skb(skb);
        }
 
@@ -1871,6 +2028,15 @@ static int txq_init(struct mv643xx_eth_private *mp, int index)
                                        nexti * sizeof(struct tx_desc);
        }
 
+       /* Allocate DMA buffers for TSO MAC/IP/TCP headers */
+       txq->tso_hdrs = dma_alloc_coherent(mp->dev->dev.parent,
+                                          txq->tx_ring_size * TSO_HEADER_SIZE,
+                                          &txq->tso_hdrs_dma, GFP_KERNEL);
+       if (txq->tso_hdrs == NULL) {
+               dma_free_coherent(mp->dev->dev.parent, txq->tx_desc_area_size,
+                                 txq->tx_desc_area, txq->tx_desc_dma);
+               return -ENOMEM;
+       }
        skb_queue_head_init(&txq->tx_skb);
 
        return 0;
@@ -1891,6 +2057,10 @@ static void txq_deinit(struct tx_queue *txq)
        else
                dma_free_coherent(mp->dev->dev.parent, txq->tx_desc_area_size,
                                  txq->tx_desc_area, txq->tx_desc_dma);
+       if (txq->tso_hdrs)
+               dma_free_coherent(mp->dev->dev.parent,
+                                 txq->tx_ring_size * TSO_HEADER_SIZE,
+                                 txq->tso_hdrs, txq->tso_hdrs_dma);
 }
 
 
@@ -2889,7 +3059,7 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
        if (err)
                goto out;
 
-       SET_ETHTOOL_OPS(dev, &mv643xx_eth_ethtool_ops);
+       dev->ethtool_ops = &mv643xx_eth_ethtool_ops;
 
        init_pscr(mp, pd->speed, pd->duplex);
 
@@ -2921,9 +3091,11 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
        dev->watchdog_timeo = 2 * HZ;
        dev->base_addr = 0;
 
-       dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
-       dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
-       dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM;
+       dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
+       dev->vlan_features = dev->features;
+
+       dev->features |= NETIF_F_RXCSUM;
+       dev->hw_features = dev->features;
 
        dev->priv_flags |= IFF_UNICAST_FLT;
 
index b161a525fc5bd8accb44b002b64776f05d8d0319..9d5ced263a5eb3d1397e95b675a2f83e71432547 100644 (file)
@@ -232,7 +232,7 @@ static int orion_mdio_probe(struct platform_device *pdev)
                clk_prepare_enable(dev->clk);
 
        dev->err_interrupt = platform_get_irq(pdev, 0);
-       if (dev->err_interrupt != -ENXIO) {
+       if (dev->err_interrupt > 0) {
                ret = devm_request_irq(&pdev->dev, dev->err_interrupt,
                                        orion_mdio_err_irq,
                                        IRQF_SHARED, pdev->name, dev);
@@ -241,6 +241,9 @@ static int orion_mdio_probe(struct platform_device *pdev)
 
                writel(MVMDIO_ERR_INT_SMI_DONE,
                        dev->regs + MVMDIO_ERR_INT_MASK);
+
+       } else if (dev->err_interrupt == -EPROBE_DEFER) {
+               return -EPROBE_DEFER;
        }
 
        mutex_init(&dev->lock);
index 14786c8bf99efcddbbbdff7bc0f9ee9e20933864..18c698d9ef9b58ccff09c28971215e9611f8cc0b 100644 (file)
@@ -23,6 +23,7 @@
 #include <net/ip.h>
 #include <net/ipv6.h>
 #include <linux/io.h>
+#include <net/tso.h>
 #include <linux/of.h>
 #include <linux/of_irq.h>
 #include <linux/of_mdio.h>
 
 #define MVNETA_TX_MTU_MAX              0x3ffff
 
+/* TSO header size */
+#define TSO_HEADER_SIZE 128
+
 /* Max number of Rx descriptors */
 #define MVNETA_MAX_RXD 128
 
@@ -413,6 +417,12 @@ struct mvneta_tx_queue {
 
        /* Index of the next TX DMA descriptor to process */
        int next_desc_to_proc;
+
+       /* DMA buffers for TSO headers */
+       char *tso_hdrs;
+
+       /* DMA address of TSO headers */
+       dma_addr_t tso_hdrs_phys;
 };
 
 struct mvneta_rx_queue {
@@ -1519,6 +1529,126 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo,
        return rx_done;
 }
 
+static inline void
+mvneta_tso_put_hdr(struct sk_buff *skb,
+                  struct mvneta_port *pp, struct mvneta_tx_queue *txq)
+{
+       struct mvneta_tx_desc *tx_desc;
+       int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+
+       txq->tx_skb[txq->txq_put_index] = NULL;
+       tx_desc = mvneta_txq_next_desc_get(txq);
+       tx_desc->data_size = hdr_len;
+       tx_desc->command = mvneta_skb_tx_csum(pp, skb);
+       tx_desc->command |= MVNETA_TXD_F_DESC;
+       tx_desc->buf_phys_addr = txq->tso_hdrs_phys +
+                                txq->txq_put_index * TSO_HEADER_SIZE;
+       mvneta_txq_inc_put(txq);
+}
+
+static inline int
+mvneta_tso_put_data(struct net_device *dev, struct mvneta_tx_queue *txq,
+                   struct sk_buff *skb, char *data, int size,
+                   bool last_tcp, bool is_last)
+{
+       struct mvneta_tx_desc *tx_desc;
+
+       tx_desc = mvneta_txq_next_desc_get(txq);
+       tx_desc->data_size = size;
+       tx_desc->buf_phys_addr = dma_map_single(dev->dev.parent, data,
+                                               size, DMA_TO_DEVICE);
+       if (unlikely(dma_mapping_error(dev->dev.parent,
+                    tx_desc->buf_phys_addr))) {
+               mvneta_txq_desc_put(txq);
+               return -ENOMEM;
+       }
+
+       tx_desc->command = 0;
+       txq->tx_skb[txq->txq_put_index] = NULL;
+
+       if (last_tcp) {
+               /* last descriptor in the TCP packet */
+               tx_desc->command = MVNETA_TXD_L_DESC;
+
+               /* last descriptor in SKB */
+               if (is_last)
+                       txq->tx_skb[txq->txq_put_index] = skb;
+       }
+       mvneta_txq_inc_put(txq);
+       return 0;
+}
+
+static int mvneta_tx_tso(struct sk_buff *skb, struct net_device *dev,
+                        struct mvneta_tx_queue *txq)
+{
+       int total_len, data_left;
+       int desc_count = 0;
+       struct mvneta_port *pp = netdev_priv(dev);
+       struct tso_t tso;
+       int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+       int i;
+
+       /* Count needed descriptors */
+       if ((txq->count + tso_count_descs(skb)) >= txq->size)
+               return 0;
+
+       if (skb_headlen(skb) < (skb_transport_offset(skb) + tcp_hdrlen(skb))) {
+               pr_info("*** Is this even  possible???!?!?\n");
+               return 0;
+       }
+
+       /* Initialize the TSO handler, and prepare the first payload */
+       tso_start(skb, &tso);
+
+       total_len = skb->len - hdr_len;
+       while (total_len > 0) {
+               char *hdr;
+
+               data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len);
+               total_len -= data_left;
+               desc_count++;
+
+               /* prepare packet headers: MAC + IP + TCP */
+               hdr = txq->tso_hdrs + txq->txq_put_index * TSO_HEADER_SIZE;
+               tso_build_hdr(skb, hdr, &tso, data_left, total_len == 0);
+
+               mvneta_tso_put_hdr(skb, pp, txq);
+
+               while (data_left > 0) {
+                       int size;
+                       desc_count++;
+
+                       size = min_t(int, tso.size, data_left);
+
+                       if (mvneta_tso_put_data(dev, txq, skb,
+                                                tso.data, size,
+                                                size == data_left,
+                                                total_len == 0))
+                               goto err_release;
+                       data_left -= size;
+
+                       tso_build_data(skb, &tso, size);
+               }
+       }
+
+       return desc_count;
+
+err_release:
+       /* Release all used data descriptors; header descriptors must not
+        * be DMA-unmapped.
+        */
+       for (i = desc_count - 1; i >= 0; i--) {
+               struct mvneta_tx_desc *tx_desc = txq->descs + i;
+               if (!(tx_desc->command & MVNETA_TXD_F_DESC))
+                       dma_unmap_single(pp->dev->dev.parent,
+                                        tx_desc->buf_phys_addr,
+                                        tx_desc->data_size,
+                                        DMA_TO_DEVICE);
+               mvneta_txq_desc_put(txq);
+       }
+       return 0;
+}
+
 /* Handle tx fragmentation processing */
 static int mvneta_tx_frag_process(struct mvneta_port *pp, struct sk_buff *skb,
                                  struct mvneta_tx_queue *txq)
@@ -1584,15 +1714,18 @@ static int mvneta_tx(struct sk_buff *skb, struct net_device *dev)
        u16 txq_id = skb_get_queue_mapping(skb);
        struct mvneta_tx_queue *txq = &pp->txqs[txq_id];
        struct mvneta_tx_desc *tx_desc;
-       struct netdev_queue *nq;
        int frags = 0;
        u32 tx_cmd;
 
        if (!netif_running(dev))
                goto out;
 
+       if (skb_is_gso(skb)) {
+               frags = mvneta_tx_tso(skb, dev, txq);
+               goto out;
+       }
+
        frags = skb_shinfo(skb)->nr_frags + 1;
-       nq    = netdev_get_tx_queue(dev, txq_id);
 
        /* Get a descriptor for the first part of the packet */
        tx_desc = mvneta_txq_next_desc_get(txq);
@@ -1635,15 +1768,16 @@ static int mvneta_tx(struct sk_buff *skb, struct net_device *dev)
                }
        }
 
-       txq->count += frags;
-       mvneta_txq_pend_desc_add(pp, txq, frags);
-
-       if (txq->size - txq->count < MAX_SKB_FRAGS + 1)
-               netif_tx_stop_queue(nq);
-
 out:
        if (frags > 0) {
                struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
+               struct netdev_queue *nq = netdev_get_tx_queue(dev, txq_id);
+
+               txq->count += frags;
+               mvneta_txq_pend_desc_add(pp, txq, frags);
+
+               if (txq->size - txq->count < MAX_SKB_FRAGS + 1)
+                       netif_tx_stop_queue(nq);
 
                u64_stats_update_begin(&stats->syncp);
                stats->tx_packets++;
@@ -2109,6 +2243,18 @@ static int mvneta_txq_init(struct mvneta_port *pp,
                                  txq->descs, txq->descs_phys);
                return -ENOMEM;
        }
+
+       /* Allocate DMA buffers for TSO MAC/IP/TCP headers */
+       txq->tso_hdrs = dma_alloc_coherent(pp->dev->dev.parent,
+                                          txq->size * TSO_HEADER_SIZE,
+                                          &txq->tso_hdrs_phys, GFP_KERNEL);
+       if (txq->tso_hdrs == NULL) {
+               kfree(txq->tx_skb);
+               dma_free_coherent(pp->dev->dev.parent,
+                                 txq->size * MVNETA_DESC_ALIGNED_SIZE,
+                                 txq->descs, txq->descs_phys);
+               return -ENOMEM;
+       }
        mvneta_tx_done_pkts_coal_set(pp, txq, txq->done_pkts_coal);
 
        return 0;
@@ -2120,6 +2266,10 @@ static void mvneta_txq_deinit(struct mvneta_port *pp,
 {
        kfree(txq->tx_skb);
 
+       if (txq->tso_hdrs)
+               dma_free_coherent(pp->dev->dev.parent,
+                                 txq->size * TSO_HEADER_SIZE,
+                                 txq->tso_hdrs, txq->tso_hdrs_phys);
        if (txq->descs)
                dma_free_coherent(pp->dev->dev.parent,
                                  txq->size * MVNETA_DESC_ALIGNED_SIZE,
@@ -2797,9 +2947,22 @@ static int mvneta_probe(struct platform_device *pdev)
 
        phy_node = of_parse_phandle(dn, "phy", 0);
        if (!phy_node) {
-               dev_err(&pdev->dev, "no associated PHY\n");
-               err = -ENODEV;
-               goto err_free_irq;
+               if (!of_phy_is_fixed_link(dn)) {
+                       dev_err(&pdev->dev, "no PHY specified\n");
+                       err = -ENODEV;
+                       goto err_free_irq;
+               }
+
+               err = of_phy_register_fixed_link(dn);
+               if (err < 0) {
+                       dev_err(&pdev->dev, "cannot register fixed PHY\n");
+                       goto err_free_irq;
+               }
+
+               /* In the case of a fixed PHY, the DT node associated
+                * to the PHY is the Ethernet MAC DT node.
+                */
+               phy_node = dn;
        }
 
        phy_mode = of_get_phy_mode(dn);
@@ -2813,7 +2976,7 @@ static int mvneta_probe(struct platform_device *pdev)
        dev->watchdog_timeo = 5 * HZ;
        dev->netdev_ops = &mvneta_netdev_ops;
 
-       SET_ETHTOOL_OPS(dev, &mvneta_eth_tool_ops);
+       dev->ethtool_ops = &mvneta_eth_tool_ops;
 
        pp = netdev_priv(dev);
 
@@ -2882,9 +3045,9 @@ static int mvneta_probe(struct platform_device *pdev)
 
        netif_napi_add(dev, &pp->napi, mvneta_poll, pp->weight);
 
-       dev->features = NETIF_F_SG | NETIF_F_IP_CSUM;
-       dev->hw_features |= NETIF_F_SG | NETIF_F_IP_CSUM;
-       dev->vlan_features |= NETIF_F_SG | NETIF_F_IP_CSUM;
+       dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
+       dev->hw_features |= dev->features;
+       dev->vlan_features |= dev->features;
        dev->priv_flags |= IFF_UNICAST_FLT;
 
        err = register_netdev(dev);
index b358c2f6f4bdc3817f98ab53c36bf8f8c7753ffd..8f5aa7c62b18f41f8eaa1b31e209687059921a47 100644 (file)
@@ -1488,7 +1488,7 @@ static int pxa168_eth_probe(struct platform_device *pdev)
        dev->netdev_ops = &pxa168_eth_netdev_ops;
        dev->watchdog_timeo = 2 * HZ;
        dev->base_addr = 0;
-       SET_ETHTOOL_OPS(dev, &pxa168_ethtool_ops);
+       dev->ethtool_ops = &pxa168_ethtool_ops;
 
        INIT_WORK(&pep->tx_timeout_task, pxa168_eth_tx_timeout_task);
 
index b81106451a0a4d2d46d831d9e629d3a24212aada..69693384b58ccfefd2bd9918112b8b08431f0368 100644 (file)
@@ -4760,7 +4760,7 @@ static struct net_device *sky2_init_netdev(struct sky2_hw *hw, unsigned port,
 
        SET_NETDEV_DEV(dev, &hw->pdev->dev);
        dev->irq = hw->pdev->irq;
-       SET_ETHTOOL_OPS(dev, &sky2_ethtool_ops);
+       dev->ethtool_ops = &sky2_ethtool_ops;
        dev->watchdog_timeo = TX_WATCHDOG;
        dev->netdev_ops = &sky2_netdev_ops[port];
 
index 78099eab767374319c7e258bfa1f0d6df4c64fa3..357dcb0f04fb70c9328a06bc2e9cc0aff92b70a1 100644 (file)
@@ -212,8 +212,7 @@ static int mlx4_comm_cmd_poll(struct mlx4_dev *dev, u8 cmd, u16 param,
 
        /* First, verify that the master reports correct status */
        if (comm_pending(dev)) {
-               mlx4_warn(dev, "Communication channel is not idle."
-                         "my toggle is %d (cmd:0x%x)\n",
+               mlx4_warn(dev, "Communication channel is not idle - my toggle is %d (cmd:0x%x)\n",
                          priv->cmd.comm_toggle, cmd);
                return -EAGAIN;
        }
@@ -422,9 +421,8 @@ static int mlx4_slave_cmd(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
                                        *out_param =
                                                be64_to_cpu(vhcr->out_param);
                                else {
-                                       mlx4_err(dev, "response expected while"
-                                                "output mailbox is NULL for "
-                                                "command 0x%x\n", op);
+                                       mlx4_err(dev, "response expected while output mailbox is NULL for command 0x%x\n",
+                                                op);
                                        vhcr->status = CMD_STAT_BAD_PARAM;
                                }
                        }
@@ -439,16 +437,15 @@ static int mlx4_slave_cmd(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
                                        *out_param =
                                                be64_to_cpu(vhcr->out_param);
                                else {
-                                       mlx4_err(dev, "response expected while"
-                                                "output mailbox is NULL for "
-                                                "command 0x%x\n", op);
+                                       mlx4_err(dev, "response expected while output mailbox is NULL for command 0x%x\n",
+                                                op);
                                        vhcr->status = CMD_STAT_BAD_PARAM;
                                }
                        }
                        ret = mlx4_status_to_errno(vhcr->status);
                } else
-                       mlx4_err(dev, "failed execution of VHCR_POST command"
-                                "opcode 0x%x\n", op);
+                       mlx4_err(dev, "failed execution of VHCR_POST command opcode 0x%x\n",
+                                op);
        }
 
        mutex_unlock(&priv->cmd.slave_cmd_mutex);
@@ -476,6 +473,13 @@ static int mlx4_cmd_poll(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
                goto out;
        }
 
+       if (out_is_imm && !out_param) {
+               mlx4_err(dev, "response expected while output mailbox is NULL for command 0x%x\n",
+                        op);
+               err = -EINVAL;
+               goto out;
+       }
+
        err = mlx4_cmd_post(dev, in_param, out_param ? *out_param : 0,
                            in_modifier, op_modifier, op, CMD_POLL_TOKEN, 0);
        if (err)
@@ -554,6 +558,13 @@ static int mlx4_cmd_wait(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
        cmd->free_head = context->next;
        spin_unlock(&cmd->context_lock);
 
+       if (out_is_imm && !out_param) {
+               mlx4_err(dev, "response expected while output mailbox is NULL for command 0x%x\n",
+                        op);
+               err = -EINVAL;
+               goto out;
+       }
+
        init_completion(&context->done);
 
        mlx4_cmd_post(dev, in_param, out_param ? *out_param : 0,
@@ -625,9 +636,8 @@ static int mlx4_ACCESS_MEM(struct mlx4_dev *dev, u64 master_addr,
 
        if ((slave_addr & 0xfff) | (master_addr & 0xfff) |
            (slave & ~0x7f) | (size & 0xff)) {
-               mlx4_err(dev, "Bad access mem params - slave_addr:0x%llx "
-                             "master_addr:0x%llx slave_id:%d size:%d\n",
-                             slave_addr, master_addr, slave, size);
+               mlx4_err(dev, "Bad access mem params - slave_addr:0x%llx master_addr:0x%llx slave_id:%d size:%d\n",
+                        slave_addr, master_addr, slave, size);
                return -EINVAL;
        }
 
@@ -788,8 +798,7 @@ static int mlx4_MAD_IFC_wrapper(struct mlx4_dev *dev, int slave,
            ((smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) ||
             (smp->mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED &&
              smp->method == IB_MGMT_METHOD_SET))) {
-               mlx4_err(dev, "slave %d is trying to execute a Subnet MGMT MAD, "
-                        "class 0x%x, method 0x%x for attr 0x%x. Rejecting\n",
+               mlx4_err(dev, "slave %d is trying to execute a Subnet MGMT MAD, class 0x%x, method 0x%x for attr 0x%x - Rejecting\n",
                         slave, smp->method, smp->mgmt_class,
                         be16_to_cpu(smp->attr_id));
                return -EPERM;
@@ -1409,8 +1418,8 @@ static int mlx4_master_process_vhcr(struct mlx4_dev *dev, int slave,
                                      ALIGN(sizeof(struct mlx4_vhcr_cmd),
                                            MLX4_ACCESS_MEM_ALIGN), 1);
                if (ret) {
-                       mlx4_err(dev, "%s:Failed reading vhcr"
-                                "ret: 0x%x\n", __func__, ret);
+                       mlx4_err(dev, "%s: Failed reading vhcr ret: 0x%x\n",
+                                __func__, ret);
                        kfree(vhcr);
                        return ret;
                }
@@ -1461,9 +1470,8 @@ static int mlx4_master_process_vhcr(struct mlx4_dev *dev, int slave,
 
        /* Apply permission and bound checks if applicable */
        if (cmd->verify && cmd->verify(dev, slave, vhcr, inbox)) {
-               mlx4_warn(dev, "Command:0x%x from slave: %d failed protection "
-                         "checks for resource_id:%d\n", vhcr->op, slave,
-                         vhcr->in_modifier);
+               mlx4_warn(dev, "Command:0x%x from slave: %d failed protection checks for resource_id:%d\n",
+                         vhcr->op, slave, vhcr->in_modifier);
                vhcr_cmd->status = CMD_STAT_BAD_OP;
                goto out_status;
        }
@@ -1502,8 +1510,7 @@ static int mlx4_master_process_vhcr(struct mlx4_dev *dev, int slave,
        }
 
        if (err) {
-               mlx4_warn(dev, "vhcr command:0x%x slave:%d failed with"
-                         " error:%d, status %d\n",
+               mlx4_warn(dev, "vhcr command:0x%x slave:%d failed with error:%d, status %d\n",
                          vhcr->op, slave, vhcr->errno, err);
                vhcr_cmd->status = mlx4_errno_to_status(err);
                goto out_status;
@@ -1537,8 +1544,8 @@ out_status:
                                 __func__);
                else if (vhcr->e_bit &&
                         mlx4_GEN_EQE(dev, slave, &priv->mfunc.master.cmd_eqe))
-                               mlx4_warn(dev, "Failed to generate command completion "
-                                         "eqe for slave %d\n", slave);
+                               mlx4_warn(dev, "Failed to generate command completion eqe for slave %d\n",
+                                         slave);
        }
 
 out:
@@ -1577,8 +1584,9 @@ static int mlx4_master_immediate_activate_vlan_qos(struct mlx4_priv *priv,
 
        mlx4_dbg(dev, "updating immediately admin params slave %d port %d\n",
                 slave, port);
-       mlx4_dbg(dev, "vlan %d QoS %d link down %d\n", vp_admin->default_vlan,
-                vp_admin->default_qos, vp_admin->link_state);
+       mlx4_dbg(dev, "vlan %d QoS %d link down %d\n",
+                vp_admin->default_vlan, vp_admin->default_qos,
+                vp_admin->link_state);
 
        work = kzalloc(sizeof(*work), GFP_KERNEL);
        if (!work)
@@ -1591,7 +1599,7 @@ static int mlx4_master_immediate_activate_vlan_qos(struct mlx4_priv *priv,
                                                   &admin_vlan_ix);
                        if (err) {
                                kfree(work);
-                               mlx4_warn((&priv->dev),
+                               mlx4_warn(&priv->dev,
                                          "No vlan resources slave %d, port %d\n",
                                          slave, port);
                                return err;
@@ -1600,7 +1608,7 @@ static int mlx4_master_immediate_activate_vlan_qos(struct mlx4_priv *priv,
                        admin_vlan_ix = NO_INDX;
                }
                work->flags |= MLX4_VF_IMMED_VLAN_FLAG_VLAN;
-               mlx4_dbg((&(priv->dev)),
+               mlx4_dbg(&priv->dev,
                         "alloc vlan %d idx  %d slave %d port %d\n",
                         (int)(vp_admin->default_vlan),
                         admin_vlan_ix, slave, port);
@@ -1661,12 +1669,12 @@ static int mlx4_master_activate_admin_state(struct mlx4_priv *priv, int slave)
                                                   vp_admin->default_vlan, &(vp_oper->vlan_idx));
                        if (err) {
                                vp_oper->vlan_idx = NO_INDX;
-                               mlx4_warn((&priv->dev),
+                               mlx4_warn(&priv->dev,
                                          "No vlan resorces slave %d, port %d\n",
                                          slave, port);
                                return err;
                        }
-                       mlx4_dbg((&(priv->dev)), "alloc vlan %d idx  %d slave %d port %d\n",
+                       mlx4_dbg(&priv->dev, "alloc vlan %d idx  %d slave %d port %d\n",
                                 (int)(vp_oper->state.default_vlan),
                                 vp_oper->vlan_idx, slave, port);
                }
@@ -1677,12 +1685,12 @@ static int mlx4_master_activate_admin_state(struct mlx4_priv *priv, int slave)
                        if (0 > vp_oper->mac_idx) {
                                err = vp_oper->mac_idx;
                                vp_oper->mac_idx = NO_INDX;
-                               mlx4_warn((&priv->dev),
+                               mlx4_warn(&priv->dev,
                                          "No mac resorces slave %d, port %d\n",
                                          slave, port);
                                return err;
                        }
-                       mlx4_dbg((&(priv->dev)), "alloc mac %llx idx  %d slave %d port %d\n",
+                       mlx4_dbg(&priv->dev, "alloc mac %llx idx  %d slave %d port %d\n",
                                 vp_oper->state.mac, vp_oper->mac_idx, slave, port);
                }
        }
@@ -1731,8 +1739,8 @@ static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd,
        slave_state[slave].comm_toggle ^= 1;
        reply = (u32) slave_state[slave].comm_toggle << 31;
        if (toggle != slave_state[slave].comm_toggle) {
-               mlx4_warn(dev, "Incorrect toggle %d from slave %d. *** MASTER"
-                         "STATE COMPROMISIED ***\n", toggle, slave);
+               mlx4_warn(dev, "Incorrect toggle %d from slave %d. *** MASTER STATE COMPROMISED ***\n",
+                         toggle, slave);
                goto reset_slave;
        }
        if (cmd == MLX4_COMM_CMD_RESET) {
@@ -1759,8 +1767,8 @@ static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd,
        /*command from slave in the middle of FLR*/
        if (cmd != MLX4_COMM_CMD_RESET &&
            MLX4_COMM_CMD_FLR == slave_state[slave].last_cmd) {
-               mlx4_warn(dev, "slave:%d is Trying to run cmd(0x%x) "
-                         "in the middle of FLR\n", slave, cmd);
+               mlx4_warn(dev, "slave:%d is Trying to run cmd(0x%x) in the middle of FLR\n",
+                         slave, cmd);
                return;
        }
 
@@ -1798,8 +1806,8 @@ static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd,
 
                mutex_lock(&priv->cmd.slave_cmd_mutex);
                if (mlx4_master_process_vhcr(dev, slave, NULL)) {
-                       mlx4_err(dev, "Failed processing vhcr for slave:%d,"
-                                " resetting slave.\n", slave);
+                       mlx4_err(dev, "Failed processing vhcr for slave:%d, resetting slave\n",
+                                slave);
                        mutex_unlock(&priv->cmd.slave_cmd_mutex);
                        goto reset_slave;
                }
@@ -1816,8 +1824,7 @@ static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd,
                is_going_down = 1;
        spin_unlock_irqrestore(&priv->mfunc.master.slave_state_lock, flags);
        if (is_going_down) {
-               mlx4_warn(dev, "Slave is going down aborting command(%d)"
-                         " executing from slave:%d\n",
+               mlx4_warn(dev, "Slave is going down aborting command(%d) executing from slave:%d\n",
                          cmd, slave);
                return;
        }
@@ -1880,9 +1887,8 @@ void mlx4_master_comm_channel(struct work_struct *work)
                        if (toggle != slt) {
                                if (master->slave_state[slave].comm_toggle
                                    != slt) {
-                                       printk(KERN_INFO "slave %d out of sync."
-                                              " read toggle %d, state toggle %d. "
-                                              "Resynching.\n", slave, slt,
+                                       printk(KERN_INFO "slave %d out of sync. read toggle %d, state toggle %d. Resynching.\n",
+                                              slave, slt,
                                               master->slave_state[slave].comm_toggle);
                                        master->slave_state[slave].comm_toggle =
                                                slt;
@@ -1896,8 +1902,7 @@ void mlx4_master_comm_channel(struct work_struct *work)
        }
 
        if (reported && reported != served)
-               mlx4_warn(dev, "Got command event with bitmask from %d slaves"
-                         " but %d were served\n",
+               mlx4_warn(dev, "Got command event with bitmask from %d slaves but %d were served\n",
                          reported, served);
 
        if (mlx4_ARM_COMM_CHANNEL(dev))
@@ -1953,7 +1958,7 @@ int mlx4_multi_func_init(struct mlx4_dev *dev)
                ioremap(pci_resource_start(dev->pdev, 2) +
                        MLX4_SLAVE_COMM_BASE, MLX4_COMM_PAGESIZE);
        if (!priv->mfunc.comm) {
-               mlx4_err(dev, "Couldn't map communication vector.\n");
+               mlx4_err(dev, "Couldn't map communication vector\n");
                goto err_vhcr;
        }
 
@@ -2080,7 +2085,7 @@ int mlx4_cmd_init(struct mlx4_dev *dev)
                priv->cmd.hcr = ioremap(pci_resource_start(dev->pdev, 0) +
                                        MLX4_HCR_BASE, MLX4_HCR_SIZE);
                if (!priv->cmd.hcr) {
-                       mlx4_err(dev, "Couldn't map command register.\n");
+                       mlx4_err(dev, "Couldn't map command register\n");
                        return -ENOMEM;
                }
        }
index 0487121e4a0fe495d4252f01b24d16bdb2fefb06..8542030b89cf5b5d0d0c60eaa9ecc96076d72341 100644 (file)
@@ -293,6 +293,9 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent,
        atomic_set(&cq->refcount, 1);
        init_completion(&cq->free);
 
+       cq->irq = priv->eq_table.eq[cq->vector].irq;
+       cq->irq_affinity_change = false;
+
        return 0;
 
 err_radix:
index c2cd8d31bcad5612395783e4d29e5141ac37ab6c..636963db598ae0025f52aa806f323c61261b7d97 100644 (file)
@@ -125,8 +125,7 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
                                                   &cq->vector)) {
                                        cq->vector = (cq->ring + 1 + priv->port)
                                            % mdev->dev->caps.num_comp_vectors;
-                                       mlx4_warn(mdev, "Failed Assigning an EQ to "
-                                                 "%s ,Falling back to legacy EQ's\n",
+                                       mlx4_warn(mdev, "Failed assigning an EQ to %s, falling back to legacy EQ's\n",
                                                  name);
                                }
                        }
index 3e8d33605fe7b17d7cde81092c92f132163abdc2..7ba3df3cb312dfdfe2cfb08074c662743c49f67a 100644 (file)
@@ -925,13 +925,13 @@ static int mlx4_en_flow_replace(struct net_device *dev,
                qpn = cmd->fs.ring_cookie & (EN_ETHTOOL_QP_ATTACH - 1);
        } else {
                if (cmd->fs.ring_cookie >= priv->rx_ring_num) {
-                       en_warn(priv, "rxnfc: RX ring (%llu) doesn't exist.\n",
+                       en_warn(priv, "rxnfc: RX ring (%llu) doesn't exist\n",
                                cmd->fs.ring_cookie);
                        return -EINVAL;
                }
                qpn = priv->rss_map.qps[cmd->fs.ring_cookie].qpn;
                if (!qpn) {
-                       en_warn(priv, "rxnfc: RX ring (%llu) is inactive.\n",
+                       en_warn(priv, "rxnfc: RX ring (%llu) is inactive\n",
                                cmd->fs.ring_cookie);
                        return -EINVAL;
                }
@@ -956,7 +956,7 @@ static int mlx4_en_flow_replace(struct net_device *dev,
        }
        err = mlx4_flow_attach(priv->mdev->dev, &rule, &reg_id);
        if (err) {
-               en_err(priv, "Fail to attach network rule at location %d.\n",
+               en_err(priv, "Fail to attach network rule at location %d\n",
                       cmd->fs.location);
                goto out_free_list;
        }
@@ -1121,7 +1121,7 @@ static int mlx4_en_set_channels(struct net_device *dev,
 {
        struct mlx4_en_priv *priv = netdev_priv(dev);
        struct mlx4_en_dev *mdev = priv->mdev;
-       int port_up;
+       int port_up = 0;
        int err = 0;
 
        if (channel->other_count || channel->combined_count ||
@@ -1151,7 +1151,8 @@ static int mlx4_en_set_channels(struct net_device *dev,
        netif_set_real_num_tx_queues(dev, priv->tx_ring_num);
        netif_set_real_num_rx_queues(dev, priv->rx_ring_num);
 
-       mlx4_en_setup_tc(dev, MLX4_EN_NUM_UP);
+       if (dev->num_tc)
+               mlx4_en_setup_tc(dev, MLX4_EN_NUM_UP);
 
        en_warn(priv, "Using %d TX rings\n", priv->tx_ring_num);
        en_warn(priv, "Using %d RX rings\n", priv->rx_ring_num);
index 0c59d4fe7e3aae56afee09b7e279601192c9e26e..f953c1d7eae6a700a4fb7aacfacbaaa822c141d1 100644 (file)
@@ -133,7 +133,7 @@ static int mlx4_en_get_profile(struct mlx4_en_dev *mdev)
                        MLX4_EN_MAX_TX_RING_P_UP);
        if (params->udp_rss && !(mdev->dev->caps.flags
                                        & MLX4_DEV_CAP_FLAG_UDP_RSS)) {
-               mlx4_warn(mdev, "UDP RSS is not supported on this device.\n");
+               mlx4_warn(mdev, "UDP RSS is not supported on this device\n");
                params->udp_rss = 0;
        }
        for (i = 1; i <= MLX4_MAX_PORTS; i++) {
@@ -251,8 +251,7 @@ static void *mlx4_en_add(struct mlx4_dev *dev)
 
        mdev->LSO_support = !!(dev->caps.flags & (1 << 15));
        if (!mdev->LSO_support)
-               mlx4_warn(mdev, "LSO not supported, please upgrade to later "
-                               "FW version to enable LSO\n");
+               mlx4_warn(mdev, "LSO not supported, please upgrade to later FW version to enable LSO\n");
 
        if (mlx4_mr_alloc(mdev->dev, mdev->priv_pdn, 0, ~0ull,
                         MLX4_PERM_LOCAL_WRITE |  MLX4_PERM_LOCAL_READ,
@@ -268,7 +267,7 @@ static void *mlx4_en_add(struct mlx4_dev *dev)
        /* Build device profile according to supplied module parameters */
        err = mlx4_en_get_profile(mdev);
        if (err) {
-               mlx4_err(mdev, "Bad module parameters, aborting.\n");
+               mlx4_err(mdev, "Bad module parameters, aborting\n");
                goto err_mr;
        }
 
index 7e4b1720c3d1bec183957beeba7d395ce38c6e34..58209bd0c94c6ced62a5984dd72911669eaa7ef9 100644 (file)
@@ -130,7 +130,7 @@ static enum mlx4_net_trans_rule_id mlx4_ip_proto_to_trans_rule_id(u8 ip_proto)
        case IPPROTO_TCP:
                return MLX4_NET_TRANS_RULE_ID_TCP;
        default:
-               return -EPROTONOSUPPORT;
+               return MLX4_NET_TRANS_RULE_NUM;
        }
 };
 
@@ -177,7 +177,7 @@ static void mlx4_en_filter_work(struct work_struct *work)
        int rc;
        __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16);
 
-       if (spec_tcp_udp.id < 0) {
+       if (spec_tcp_udp.id >= MLX4_NET_TRANS_RULE_NUM) {
                en_warn(priv, "RFS: ignoring unsupported ip protocol (%d)\n",
                        filter->ip_proto);
                goto ignore;
@@ -770,11 +770,12 @@ static int mlx4_en_do_set_mac(struct mlx4_en_priv *priv)
                                          priv->dev->dev_addr, priv->prev_mac);
                if (err)
                        en_err(priv, "Failed changing HW MAC address\n");
-               memcpy(priv->prev_mac, priv->dev->dev_addr,
-                      sizeof(priv->prev_mac));
        } else
                en_dbg(HW, priv, "Port is down while registering mac, exiting...\n");
 
+       memcpy(priv->prev_mac, priv->dev->dev_addr,
+              sizeof(priv->prev_mac));
+
        return err;
 }
 
@@ -788,9 +789,8 @@ static int mlx4_en_set_mac(struct net_device *dev, void *addr)
        if (!is_valid_ether_addr(saddr->sa_data))
                return -EADDRNOTAVAIL;
 
-       memcpy(dev->dev_addr, saddr->sa_data, ETH_ALEN);
-
        mutex_lock(&mdev->state_lock);
+       memcpy(dev->dev_addr, saddr->sa_data, ETH_ALEN);
        err = mlx4_en_do_set_mac(priv);
        mutex_unlock(&mdev->state_lock);
 
@@ -1576,7 +1576,7 @@ int mlx4_en_start_port(struct net_device *dev)
                        cq->buf[j].owner_sr_opcode = MLX4_CQE_OWNER_MASK;
                err = mlx4_en_set_cq_moder(priv, cq);
                if (err) {
-                       en_err(priv, "Failed setting cq moderation parameters");
+                       en_err(priv, "Failed setting cq moderation parameters\n");
                        mlx4_en_deactivate_cq(priv, cq);
                        goto cq_err;
                }
@@ -1615,7 +1615,7 @@ int mlx4_en_start_port(struct net_device *dev)
                }
                err = mlx4_en_set_cq_moder(priv, cq);
                if (err) {
-                       en_err(priv, "Failed setting cq moderation parameters");
+                       en_err(priv, "Failed setting cq moderation parameters\n");
                        mlx4_en_deactivate_cq(priv, cq);
                        goto tx_err;
                }
@@ -2539,7 +2539,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
        netif_set_real_num_tx_queues(dev, priv->tx_ring_num);
        netif_set_real_num_rx_queues(dev, priv->rx_ring_num);
 
-       SET_ETHTOOL_OPS(dev, &mlx4_en_ethtool_ops);
+       dev->ethtool_ops = &mlx4_en_ethtool_ops;
 
        /*
         * Set driver features
@@ -2594,8 +2594,8 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
                                    prof->tx_pause, prof->tx_ppp,
                                    prof->rx_pause, prof->rx_ppp);
        if (err) {
-               en_err(priv, "Failed setting port general configurations "
-                      "for port %d, with error %d\n", priv->port, err);
+               en_err(priv, "Failed setting port general configurations for port %d, with error %d\n",
+                      priv->port, err);
                goto out;
        }
 
index ba049ae88749dac986a0712d281bbd649152acdd..e8c0d2b832b79f4f46b82cf52bca646e9b13008b 100644 (file)
@@ -270,13 +270,11 @@ static int mlx4_en_fill_rx_buffers(struct mlx4_en_priv *priv)
                                                    ring->actual_size,
                                                    GFP_KERNEL)) {
                                if (ring->actual_size < MLX4_EN_MIN_RX_SIZE) {
-                                       en_err(priv, "Failed to allocate "
-                                                    "enough rx buffers\n");
+                                       en_err(priv, "Failed to allocate enough rx buffers\n");
                                        return -ENOMEM;
                                } else {
                                        new_size = rounddown_pow_of_two(ring->actual_size);
-                                       en_warn(priv, "Only %d buffers allocated "
-                                                     "reducing ring size to %d",
+                                       en_warn(priv, "Only %d buffers allocated reducing ring size to %d\n",
                                                ring->actual_size, new_size);
                                        goto reduce_rings;
                                }
@@ -685,10 +683,9 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
                /* Drop packet on bad receive or bad checksum */
                if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) ==
                                                MLX4_CQE_OPCODE_ERROR)) {
-                       en_err(priv, "CQE completed in error - vendor "
-                                 "syndrom:%d syndrom:%d\n",
-                                 ((struct mlx4_err_cqe *) cqe)->vendor_err_syndrome,
-                                 ((struct mlx4_err_cqe *) cqe)->syndrome);
+                       en_err(priv, "CQE completed in error - vendor syndrom:%d syndrom:%d\n",
+                              ((struct mlx4_err_cqe *)cqe)->vendor_err_syndrome,
+                              ((struct mlx4_err_cqe *)cqe)->syndrome);
                        goto next;
                }
                if (unlikely(cqe->badfcs_enc & MLX4_CQE_BAD_FCS)) {
@@ -898,10 +895,17 @@ int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget)
        mlx4_en_cq_unlock_napi(cq);
 
        /* If we used up all the quota - we're probably not done yet... */
-       if (done == budget)
+       if (done == budget) {
                INC_PERF_COUNTER(priv->pstats.napi_quota);
-       else {
+               if (unlikely(cq->mcq.irq_affinity_change)) {
+                       cq->mcq.irq_affinity_change = false;
+                       napi_complete(napi);
+                       mlx4_en_arm_cq(priv, cq);
+                       return 0;
+               }
+       } else {
                /* Done for now */
+               cq->mcq.irq_affinity_change = false;
                napi_complete(napi);
                mlx4_en_arm_cq(priv, cq);
        }
@@ -944,8 +948,8 @@ void mlx4_en_calc_rx_buf(struct net_device *dev)
        priv->rx_skb_size = eff_mtu;
        priv->log_rx_info = ROUNDUP_LOG2(i * sizeof(struct mlx4_en_rx_alloc));
 
-       en_dbg(DRV, priv, "Rx buffer scatter-list (effective-mtu:%d "
-                 "num_frags:%d):\n", eff_mtu, priv->num_frags);
+       en_dbg(DRV, priv, "Rx buffer scatter-list (effective-mtu:%d num_frags:%d):\n",
+              eff_mtu, priv->num_frags);
        for (i = 0; i < priv->num_frags; i++) {
                en_err(priv,
                       "  frag:%d - size:%d prefix:%d align:%d stride:%d\n",
index dd1f6d346459808dfe95690ce5fcf0af31e99231..cb964056d71023a40ff5f68b7ceb3a367607494a 100644 (file)
@@ -108,9 +108,9 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
 
        ring->buf = ring->wqres.buf.direct.buf;
 
-       en_dbg(DRV, priv, "Allocated TX ring (addr:%p) - buf:%p size:%d "
-              "buf_size:%d dma:%llx\n", ring, ring->buf, ring->size,
-              ring->buf_size, (unsigned long long) ring->wqres.buf.direct.map);
+       en_dbg(DRV, priv, "Allocated TX ring (addr:%p) - buf:%p size:%d buf_size:%d dma:%llx\n",
+              ring, ring->buf, ring->size, ring->buf_size,
+              (unsigned long long) ring->wqres.buf.direct.map);
 
        ring->qpn = qpn;
        err = mlx4_qp_alloc(mdev->dev, ring->qpn, &ring->qp);
@@ -122,7 +122,7 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
 
        err = mlx4_bf_alloc(mdev->dev, &ring->bf, node);
        if (err) {
-               en_dbg(DRV, priv, "working without blueflame (%d)", err);
+               en_dbg(DRV, priv, "working without blueflame (%d)\n", err);
                ring->bf.uar = &mdev->priv_uar;
                ring->bf.uar->map = mdev->uar_map;
                ring->bf_enabled = false;
@@ -474,9 +474,15 @@ int mlx4_en_poll_tx_cq(struct napi_struct *napi, int budget)
        /* If we used up all the quota - we're probably not done yet... */
        if (done < budget) {
                /* Done for now */
+               cq->mcq.irq_affinity_change = false;
                napi_complete(napi);
                mlx4_en_arm_cq(priv, cq);
                return done;
+       } else if (unlikely(cq->mcq.irq_affinity_change)) {
+               cq->mcq.irq_affinity_change = false;
+               napi_complete(napi);
+               mlx4_en_arm_cq(priv, cq);
+               return 0;
        }
        return budget;
 }
index d501a2b0fb79f18e560fd0cd067aa4c19b83b447..d954ec1eac173752e23e57653ccd4d2cae2de944 100644 (file)
@@ -53,6 +53,11 @@ enum {
        MLX4_EQ_ENTRY_SIZE      = 0x20
 };
 
+struct mlx4_irq_notify {
+       void *arg;
+       struct irq_affinity_notify notify;
+};
+
 #define MLX4_EQ_STATUS_OK         ( 0 << 28)
 #define MLX4_EQ_STATUS_WRITE_FAIL  (10 << 28)
 #define MLX4_EQ_OWNER_SW          ( 0 << 24)
@@ -152,14 +157,13 @@ void mlx4_gen_slave_eqe(struct work_struct *work)
                                if (i != dev->caps.function &&
                                    master->slave_state[i].active)
                                        if (mlx4_GEN_EQE(dev, i, eqe))
-                                               mlx4_warn(dev, "Failed to "
-                                                         " generate event "
-                                                         "for slave %d\n", i);
+                                               mlx4_warn(dev, "Failed to generate event for slave %d\n",
+                                                         i);
                        }
                } else {
                        if (mlx4_GEN_EQE(dev, slave, eqe))
-                               mlx4_warn(dev, "Failed to generate event "
-                                              "for slave %d\n", slave);
+                               mlx4_warn(dev, "Failed to generate event for slave %d\n",
+                                         slave);
                }
                ++slave_eq->cons;
        }
@@ -177,8 +181,8 @@ static void slave_event(struct mlx4_dev *dev, u8 slave, struct mlx4_eqe *eqe)
        s_eqe = &slave_eq->event_eqe[slave_eq->prod & (SLAVE_EVENT_EQ_SIZE - 1)];
        if ((!!(s_eqe->owner & 0x80)) ^
            (!!(slave_eq->prod & SLAVE_EVENT_EQ_SIZE))) {
-               mlx4_warn(dev, "Master failed to generate an EQE for slave: %d. "
-                         "No free EQE on slave events queue\n", slave);
+               mlx4_warn(dev, "Master failed to generate an EQE for slave: %d. No free EQE on slave events queue\n",
+                         slave);
                spin_unlock_irqrestore(&slave_eq->event_lock, flags);
                return;
        }
@@ -375,9 +379,9 @@ int set_and_calc_slave_port_state(struct mlx4_dev *dev, int slave,
                }
                break;
        default:
-               pr_err("%s: BUG!!! UNKNOWN state: "
-                      "slave:%d, port:%d\n", __func__, slave, port);
-                       goto out;
+               pr_err("%s: BUG!!! UNKNOWN state: slave:%d, port:%d\n",
+                      __func__, slave, port);
+               goto out;
        }
        ret = mlx4_get_slave_port_state(dev, slave, port);
 
@@ -425,8 +429,8 @@ void mlx4_master_handle_slave_flr(struct work_struct *work)
        for (i = 0 ; i < dev->num_slaves; i++) {
 
                if (MLX4_COMM_CMD_FLR == slave_state[i].last_cmd) {
-                       mlx4_dbg(dev, "mlx4_handle_slave_flr: "
-                                "clean slave: %d\n", i);
+                       mlx4_dbg(dev, "mlx4_handle_slave_flr: clean slave: %d\n",
+                                i);
 
                        mlx4_delete_all_resources_for_slave(dev, i);
                        /*return the slave to running mode*/
@@ -438,8 +442,8 @@ void mlx4_master_handle_slave_flr(struct work_struct *work)
                        err = mlx4_cmd(dev, 0, i, 0, MLX4_CMD_INFORM_FLR_DONE,
                                       MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
                        if (err)
-                               mlx4_warn(dev, "Failed to notify FW on "
-                                         "FLR done (slave:%d)\n", i);
+                               mlx4_warn(dev, "Failed to notify FW on FLR done (slave:%d)\n",
+                                         i);
                }
        }
 }
@@ -490,9 +494,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
                                                be32_to_cpu(eqe->event.qp.qpn)
                                                & 0xffffff, &slave);
                                if (ret && ret != -ENOENT) {
-                                       mlx4_dbg(dev, "QP event %02x(%02x) on "
-                                                "EQ %d at index %u: could "
-                                                "not get slave id (%d)\n",
+                                       mlx4_dbg(dev, "QP event %02x(%02x) on EQ %d at index %u: could not get slave id (%d)\n",
                                                 eqe->type, eqe->subtype,
                                                 eq->eqn, eq->cons_index, ret);
                                        break;
@@ -520,23 +522,19 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
                                                & 0xffffff,
                                                &slave);
                                if (ret && ret != -ENOENT) {
-                                       mlx4_warn(dev, "SRQ event %02x(%02x) "
-                                                 "on EQ %d at index %u: could"
-                                                 " not get slave id (%d)\n",
+                                       mlx4_warn(dev, "SRQ event %02x(%02x) on EQ %d at index %u: could not get slave id (%d)\n",
                                                  eqe->type, eqe->subtype,
                                                  eq->eqn, eq->cons_index, ret);
                                        break;
                                }
-                               mlx4_warn(dev, "%s: slave:%d, srq_no:0x%x,"
-                                         " event: %02x(%02x)\n", __func__,
-                                         slave,
+                               mlx4_warn(dev, "%s: slave:%d, srq_no:0x%x, event: %02x(%02x)\n",
+                                         __func__, slave,
                                          be32_to_cpu(eqe->event.srq.srqn),
                                          eqe->type, eqe->subtype);
 
                                if (!ret && slave != dev->caps.function) {
-                                       mlx4_warn(dev, "%s: sending event "
-                                                 "%02x(%02x) to slave:%d\n",
-                                                  __func__, eqe->type,
+                                       mlx4_warn(dev, "%s: sending event %02x(%02x) to slave:%d\n",
+                                                 __func__, eqe->type,
                                                  eqe->subtype, slave);
                                        mlx4_slave_event(dev, slave, eqe);
                                        break;
@@ -569,8 +567,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
                                        if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH) {
                                                if (i == mlx4_master_func_num(dev))
                                                        continue;
-                                               mlx4_dbg(dev, "%s: Sending MLX4_PORT_CHANGE_SUBTYPE_DOWN"
-                                                        " to slave: %d, port:%d\n",
+                                               mlx4_dbg(dev, "%s: Sending MLX4_PORT_CHANGE_SUBTYPE_DOWN to slave: %d, port:%d\n",
                                                         __func__, i, port);
                                                s_info = &priv->mfunc.master.vf_oper[slave].vport[port].state;
                                                if (IFLA_VF_LINK_STATE_AUTO == s_info->link_state) {
@@ -634,11 +631,9 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
                                        be32_to_cpu(eqe->event.cq_err.cqn)
                                        & 0xffffff, &slave);
                                if (ret && ret != -ENOENT) {
-                                       mlx4_dbg(dev, "CQ event %02x(%02x) on "
-                                                "EQ %d at index %u: could "
-                                                 "not get slave id (%d)\n",
-                                                 eqe->type, eqe->subtype,
-                                                 eq->eqn, eq->cons_index, ret);
+                                       mlx4_dbg(dev, "CQ event %02x(%02x) on EQ %d at index %u: could not get slave id (%d)\n",
+                                                eqe->type, eqe->subtype,
+                                                eq->eqn, eq->cons_index, ret);
                                        break;
                                }
 
@@ -667,8 +662,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
 
                case MLX4_EVENT_TYPE_COMM_CHANNEL:
                        if (!mlx4_is_master(dev)) {
-                               mlx4_warn(dev, "Received comm channel event "
-                                              "for non master device\n");
+                               mlx4_warn(dev, "Received comm channel event for non master device\n");
                                break;
                        }
                        memcpy(&priv->mfunc.master.comm_arm_bit_vector,
@@ -681,8 +675,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
                case MLX4_EVENT_TYPE_FLR_EVENT:
                        flr_slave = be32_to_cpu(eqe->event.flr_event.slave_id);
                        if (!mlx4_is_master(dev)) {
-                               mlx4_warn(dev, "Non-master function received"
-                                              "FLR event\n");
+                               mlx4_warn(dev, "Non-master function received FLR event\n");
                                break;
                        }
 
@@ -711,22 +704,17 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
                        if (eqe->subtype == MLX4_FATAL_WARNING_SUBTYPE_WARMING) {
                                if (mlx4_is_master(dev))
                                        for (i = 0; i < dev->num_slaves; i++) {
-                                               mlx4_dbg(dev, "%s: Sending "
-                                                       "MLX4_FATAL_WARNING_SUBTYPE_WARMING"
-                                                       " to slave: %d\n", __func__, i);
+                                               mlx4_dbg(dev, "%s: Sending MLX4_FATAL_WARNING_SUBTYPE_WARMING to slave: %d\n",
+                                                        __func__, i);
                                                if (i == dev->caps.function)
                                                        continue;
                                                mlx4_slave_event(dev, i, eqe);
                                        }
-                               mlx4_err(dev, "Temperature Threshold was reached! "
-                                       "Threshold: %d celsius degrees; "
-                                       "Current Temperature: %d\n",
-                                       be16_to_cpu(eqe->event.warming.warning_threshold),
-                                       be16_to_cpu(eqe->event.warming.current_temperature));
+                               mlx4_err(dev, "Temperature Threshold was reached! Threshold: %d celsius degrees; Current Temperature: %d\n",
+                                        be16_to_cpu(eqe->event.warming.warning_threshold),
+                                        be16_to_cpu(eqe->event.warming.current_temperature));
                        } else
-                               mlx4_warn(dev, "Unhandled event FATAL WARNING (%02x), "
-                                         "subtype %02x on EQ %d at index %u. owner=%x, "
-                                         "nent=0x%x, slave=%x, ownership=%s\n",
+                               mlx4_warn(dev, "Unhandled event FATAL WARNING (%02x), subtype %02x on EQ %d at index %u. owner=%x, nent=0x%x, slave=%x, ownership=%s\n",
                                          eqe->type, eqe->subtype, eq->eqn,
                                          eq->cons_index, eqe->owner, eq->nent,
                                          eqe->slave_id,
@@ -743,9 +731,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
                case MLX4_EVENT_TYPE_EEC_CATAS_ERROR:
                case MLX4_EVENT_TYPE_ECC_DETECT:
                default:
-                       mlx4_warn(dev, "Unhandled event %02x(%02x) on EQ %d at "
-                                 "index %u. owner=%x, nent=0x%x, slave=%x, "
-                                 "ownership=%s\n",
+                       mlx4_warn(dev, "Unhandled event %02x(%02x) on EQ %d at index %u. owner=%x, nent=0x%x, slave=%x, ownership=%s\n",
                                  eqe->type, eqe->subtype, eq->eqn,
                                  eq->cons_index, eqe->owner, eq->nent,
                                  eqe->slave_id,
@@ -1088,7 +1074,7 @@ static int mlx4_map_clr_int(struct mlx4_dev *dev)
        priv->clr_base = ioremap(pci_resource_start(dev->pdev, priv->fw.clr_int_bar) +
                                 priv->fw.clr_int_base, MLX4_CLR_INT_SIZE);
        if (!priv->clr_base) {
-               mlx4_err(dev, "Couldn't map interrupt clear register, aborting.\n");
+               mlx4_err(dev, "Couldn't map interrupt clear register, aborting\n");
                return -ENOMEM;
        }
 
@@ -1102,6 +1088,57 @@ static void mlx4_unmap_clr_int(struct mlx4_dev *dev)
        iounmap(priv->clr_base);
 }
 
+static void mlx4_irq_notifier_notify(struct irq_affinity_notify *notify,
+                                    const cpumask_t *mask)
+{
+       struct mlx4_irq_notify *n = container_of(notify,
+                                                struct mlx4_irq_notify,
+                                                notify);
+       struct mlx4_priv *priv = (struct mlx4_priv *)n->arg;
+       struct radix_tree_iter iter;
+       void **slot;
+
+       radix_tree_for_each_slot(slot, &priv->cq_table.tree, &iter, 0) {
+               struct mlx4_cq *cq = (struct mlx4_cq *)(*slot);
+
+               if (cq->irq == notify->irq)
+                       cq->irq_affinity_change = true;
+       }
+}
+
+static void mlx4_release_irq_notifier(struct kref *ref)
+{
+       struct mlx4_irq_notify *n = container_of(ref, struct mlx4_irq_notify,
+                                                notify.kref);
+       kfree(n);
+}
+
+static void mlx4_assign_irq_notifier(struct mlx4_priv *priv,
+                                    struct mlx4_dev *dev, int irq)
+{
+       struct mlx4_irq_notify *irq_notifier = NULL;
+       int err = 0;
+
+       irq_notifier = kzalloc(sizeof(*irq_notifier), GFP_KERNEL);
+       if (!irq_notifier) {
+               mlx4_warn(dev, "Failed to allocate irq notifier. irq %d\n",
+                         irq);
+               return;
+       }
+
+       irq_notifier->notify.irq = irq;
+       irq_notifier->notify.notify = mlx4_irq_notifier_notify;
+       irq_notifier->notify.release = mlx4_release_irq_notifier;
+       irq_notifier->arg = priv;
+       err = irq_set_affinity_notifier(irq, &irq_notifier->notify);
+       if (err) {
+               kfree(irq_notifier);
+               irq_notifier = NULL;
+               mlx4_warn(dev, "Failed to set irq notifier. irq %d\n", irq);
+       }
+}
+
+
 int mlx4_alloc_eq_table(struct mlx4_dev *dev)
 {
        struct mlx4_priv *priv = mlx4_priv(dev);
@@ -1372,6 +1409,9 @@ int mlx4_assign_eq(struct mlx4_dev *dev, char *name, struct cpu_rmap *rmap,
                                continue;
                                /*we dont want to break here*/
                        }
+                       mlx4_assign_irq_notifier(priv, dev,
+                                                priv->eq_table.eq[vec].irq);
+
                        eq_set_ci(&priv->eq_table.eq[vec], 1);
                }
        }
@@ -1398,6 +1438,9 @@ void mlx4_release_eq(struct mlx4_dev *dev, int vec)
                  Belonging to a legacy EQ*/
                mutex_lock(&priv->msix_ctl.pool_lock);
                if (priv->msix_ctl.pool_bm & 1ULL << i) {
+                       irq_set_affinity_notifier(
+                               priv->eq_table.eq[vec].irq,
+                               NULL);
                        free_irq(priv->eq_table.eq[vec].irq,
                                 &priv->eq_table.eq[vec]);
                        priv->msix_ctl.pool_bm &= ~(1ULL << i);
index d16a4d11890342167a2f2c8605e3b5e4e9d25198..c52e048913177371f6db13f4e3f5e98100585b52 100644 (file)
@@ -428,8 +428,7 @@ int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, u32 gen_or_port,
        } else if (dev->caps.port_type[gen_or_port] == MLX4_PORT_TYPE_IB) {
                MLX4_GET(field, outbox, QUERY_FUNC_CAP_FLAGS0_OFFSET);
                if (field & QUERY_FUNC_CAP_FLAGS0_FORCE_PHY_WQE_GID) {
-                       mlx4_err(dev, "phy_wqe_gid is "
-                                "enforced on this ib port\n");
+                       mlx4_err(dev, "phy_wqe_gid is enforced on this ib port\n");
                        err = -EPROTONOSUPPORT;
                        goto out;
                }
@@ -1054,10 +1053,10 @@ int mlx4_map_cmd(struct mlx4_dev *dev, u16 op, struct mlx4_icm *icm, u64 virt)
                 */
                lg = ffs(mlx4_icm_addr(&iter) | mlx4_icm_size(&iter)) - 1;
                if (lg < MLX4_ICM_PAGE_SHIFT) {
-                       mlx4_warn(dev, "Got FW area not aligned to %d (%llx/%lx).\n",
-                                  MLX4_ICM_PAGE_SIZE,
-                                  (unsigned long long) mlx4_icm_addr(&iter),
-                                  mlx4_icm_size(&iter));
+                       mlx4_warn(dev, "Got FW area not aligned to %d (%llx/%lx)\n",
+                                 MLX4_ICM_PAGE_SIZE,
+                                 (unsigned long long) mlx4_icm_addr(&iter),
+                                 mlx4_icm_size(&iter));
                        err = -EINVAL;
                        goto out;
                }
@@ -1093,14 +1092,14 @@ int mlx4_map_cmd(struct mlx4_dev *dev, u16 op, struct mlx4_icm *icm, u64 virt)
 
        switch (op) {
        case MLX4_CMD_MAP_FA:
-               mlx4_dbg(dev, "Mapped %d chunks/%d KB for FW.\n", tc, ts);
+               mlx4_dbg(dev, "Mapped %d chunks/%d KB for FW\n", tc, ts);
                break;
        case MLX4_CMD_MAP_ICM_AUX:
-               mlx4_dbg(dev, "Mapped %d chunks/%d KB for ICM aux.\n", tc, ts);
+               mlx4_dbg(dev, "Mapped %d chunks/%d KB for ICM aux\n", tc, ts);
                break;
        case MLX4_CMD_MAP_ICM:
-               mlx4_dbg(dev, "Mapped %d chunks/%d KB at %llx for ICM.\n",
-                         tc, ts, (unsigned long long) virt - (ts << 10));
+               mlx4_dbg(dev, "Mapped %d chunks/%d KB at %llx for ICM\n",
+                        tc, ts, (unsigned long long) virt - (ts << 10));
                break;
        }
 
@@ -1186,14 +1185,13 @@ int mlx4_QUERY_FW(struct mlx4_dev *dev)
        MLX4_GET(cmd_if_rev, outbox, QUERY_FW_CMD_IF_REV_OFFSET);
        if (cmd_if_rev < MLX4_COMMAND_INTERFACE_MIN_REV ||
            cmd_if_rev > MLX4_COMMAND_INTERFACE_MAX_REV) {
-               mlx4_err(dev, "Installed FW has unsupported "
-                        "command interface revision %d.\n",
+               mlx4_err(dev, "Installed FW has unsupported command interface revision %d\n",
                         cmd_if_rev);
                mlx4_err(dev, "(Installed FW version is %d.%d.%03d)\n",
                         (int) (dev->caps.fw_ver >> 32),
                         (int) (dev->caps.fw_ver >> 16) & 0xffff,
                         (int) dev->caps.fw_ver & 0xffff);
-               mlx4_err(dev, "This driver version supports only revisions %d to %d.\n",
+               mlx4_err(dev, "This driver version supports only revisions %d to %d\n",
                         MLX4_COMMAND_INTERFACE_MIN_REV, MLX4_COMMAND_INTERFACE_MAX_REV);
                err = -ENODEV;
                goto out;
index cef267e24f9c9c680613ec4ed2817c040b21c964..a56f6012258d50059e97cf9343a51adc367154e6 100644 (file)
@@ -104,8 +104,6 @@ module_param(enable_64b_cqe_eqe, bool, 0444);
 MODULE_PARM_DESC(enable_64b_cqe_eqe,
                 "Enable 64 byte CQEs/EQEs when the FW supports this (default: True)");
 
-#define HCA_GLOBAL_CAP_MASK            0
-
 #define PF_CONTEXT_BEHAVIOUR_MASK      MLX4_FUNC_CAP_64B_EQE_CQE
 
 static char mlx4_version[] =
@@ -163,8 +161,7 @@ int mlx4_check_port_params(struct mlx4_dev *dev,
        for (i = 0; i < dev->caps.num_ports - 1; i++) {
                if (port_type[i] != port_type[i + 1]) {
                        if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP)) {
-                               mlx4_err(dev, "Only same port types supported "
-                                        "on this HCA, aborting.\n");
+                               mlx4_err(dev, "Only same port types supported on this HCA, aborting\n");
                                return -EINVAL;
                        }
                }
@@ -172,8 +169,8 @@ int mlx4_check_port_params(struct mlx4_dev *dev,
 
        for (i = 0; i < dev->caps.num_ports; i++) {
                if (!(port_type[i] & dev->caps.supported_type[i+1])) {
-                       mlx4_err(dev, "Requested port type for port %d is not "
-                                     "supported on this HCA\n", i + 1);
+                       mlx4_err(dev, "Requested port type for port %d is not supported on this HCA\n",
+                                i + 1);
                        return -EINVAL;
                }
        }
@@ -195,26 +192,23 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
 
        err = mlx4_QUERY_DEV_CAP(dev, dev_cap);
        if (err) {
-               mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n");
+               mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting\n");
                return err;
        }
 
        if (dev_cap->min_page_sz > PAGE_SIZE) {
-               mlx4_err(dev, "HCA minimum page size of %d bigger than "
-                        "kernel PAGE_SIZE of %ld, aborting.\n",
+               mlx4_err(dev, "HCA minimum page size of %d bigger than kernel PAGE_SIZE of %ld, aborting\n",
                         dev_cap->min_page_sz, PAGE_SIZE);
                return -ENODEV;
        }
        if (dev_cap->num_ports > MLX4_MAX_PORTS) {
-               mlx4_err(dev, "HCA has %d ports, but we only support %d, "
-                        "aborting.\n",
+               mlx4_err(dev, "HCA has %d ports, but we only support %d, aborting\n",
                         dev_cap->num_ports, MLX4_MAX_PORTS);
                return -ENODEV;
        }
 
        if (dev_cap->uar_size > pci_resource_len(dev->pdev, 2)) {
-               mlx4_err(dev, "HCA reported UAR size of 0x%x bigger than "
-                        "PCI resource 2 size of 0x%llx, aborting.\n",
+               mlx4_err(dev, "HCA reported UAR size of 0x%x bigger than PCI resource 2 size of 0x%llx, aborting\n",
                         dev_cap->uar_size,
                         (unsigned long long) pci_resource_len(dev->pdev, 2));
                return -ENODEV;
@@ -347,14 +341,12 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
 
                if (dev->caps.log_num_macs > dev_cap->log_max_macs[i]) {
                        dev->caps.log_num_macs = dev_cap->log_max_macs[i];
-                       mlx4_warn(dev, "Requested number of MACs is too much "
-                                 "for port %d, reducing to %d.\n",
+                       mlx4_warn(dev, "Requested number of MACs is too much for port %d, reducing to %d\n",
                                  i, 1 << dev->caps.log_num_macs);
                }
                if (dev->caps.log_num_vlans > dev_cap->log_max_vlans[i]) {
                        dev->caps.log_num_vlans = dev_cap->log_max_vlans[i];
-                       mlx4_warn(dev, "Requested number of VLANs is too much "
-                                 "for port %d, reducing to %d.\n",
+                       mlx4_warn(dev, "Requested number of VLANs is too much for port %d, reducing to %d\n",
                                  i, 1 << dev->caps.log_num_vlans);
                }
        }
@@ -584,13 +576,14 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
        memset(&hca_param, 0, sizeof(hca_param));
        err = mlx4_QUERY_HCA(dev, &hca_param);
        if (err) {
-               mlx4_err(dev, "QUERY_HCA command failed, aborting.\n");
+               mlx4_err(dev, "QUERY_HCA command failed, aborting\n");
                return err;
        }
 
-       /*fail if the hca has an unknown capability */
-       if ((hca_param.global_caps | HCA_GLOBAL_CAP_MASK) !=
-           HCA_GLOBAL_CAP_MASK) {
+       /* fail if the hca has an unknown global capability
+        * at this time global_caps should be always zeroed
+        */
+       if (hca_param.global_caps) {
                mlx4_err(dev, "Unknown hca global capabilities\n");
                return -ENOSYS;
        }
@@ -603,19 +596,18 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
        dev->caps.max_qp_dest_rdma = 1 << hca_param.log_rd_per_qp;
        err = mlx4_dev_cap(dev, &dev_cap);
        if (err) {
-               mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n");
+               mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting\n");
                return err;
        }
 
        err = mlx4_QUERY_FW(dev);
        if (err)
-               mlx4_err(dev, "QUERY_FW command failed: could not get FW version.\n");
+               mlx4_err(dev, "QUERY_FW command failed: could not get FW version\n");
 
        page_size = ~dev->caps.page_size_cap + 1;
        mlx4_warn(dev, "HCA minimum page size:%d\n", page_size);
        if (page_size > PAGE_SIZE) {
-               mlx4_err(dev, "HCA minimum page size of %d bigger than "
-                        "kernel PAGE_SIZE of %ld, aborting.\n",
+               mlx4_err(dev, "HCA minimum page size of %d bigger than kernel PAGE_SIZE of %ld, aborting\n",
                         page_size, PAGE_SIZE);
                return -ENODEV;
        }
@@ -633,8 +625,8 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
        memset(&func_cap, 0, sizeof(func_cap));
        err = mlx4_QUERY_FUNC_CAP(dev, 0, &func_cap);
        if (err) {
-               mlx4_err(dev, "QUERY_FUNC_CAP general command failed, aborting (%d).\n",
-                         err);
+               mlx4_err(dev, "QUERY_FUNC_CAP general command failed, aborting (%d)\n",
+                        err);
                return err;
        }
 
@@ -661,8 +653,8 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
        dev->caps.num_amgms             = 0;
 
        if (dev->caps.num_ports > MLX4_MAX_PORTS) {
-               mlx4_err(dev, "HCA has %d ports, but we only support %d, "
-                        "aborting.\n", dev->caps.num_ports, MLX4_MAX_PORTS);
+               mlx4_err(dev, "HCA has %d ports, but we only support %d, aborting\n",
+                        dev->caps.num_ports, MLX4_MAX_PORTS);
                return -ENODEV;
        }
 
@@ -680,8 +672,8 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
        for (i = 1; i <= dev->caps.num_ports; ++i) {
                err = mlx4_QUERY_FUNC_CAP(dev, (u32) i, &func_cap);
                if (err) {
-                       mlx4_err(dev, "QUERY_FUNC_CAP port command failed for"
-                                " port %d, aborting (%d).\n", i, err);
+                       mlx4_err(dev, "QUERY_FUNC_CAP port command failed for port %d, aborting (%d)\n",
+                                i, err);
                        goto err_mem;
                }
                dev->caps.qp0_tunnel[i - 1] = func_cap.qp0_tunnel_qpn;
@@ -699,8 +691,7 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
        if (dev->caps.uar_page_size * (dev->caps.num_uars -
                                       dev->caps.reserved_uars) >
                                       pci_resource_len(dev->pdev, 2)) {
-               mlx4_err(dev, "HCA reported UAR region size of 0x%x bigger than "
-                        "PCI resource 2 size of 0x%llx, aborting.\n",
+               mlx4_err(dev, "HCA reported UAR region size of 0x%x bigger than PCI resource 2 size of 0x%llx, aborting\n",
                         dev->caps.uar_page_size * dev->caps.num_uars,
                         (unsigned long long) pci_resource_len(dev->pdev, 2));
                goto err_mem;
@@ -722,7 +713,7 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
        }
 
        dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS;
-       mlx4_warn(dev, "Timestamping is not supported in slave mode.\n");
+       mlx4_warn(dev, "Timestamping is not supported in slave mode\n");
 
        slave_adjust_steering_mode(dev, &dev_cap, &hca_param);
 
@@ -754,10 +745,10 @@ static void mlx4_request_modules(struct mlx4_dev *dev)
                        has_eth_port = true;
        }
 
-       if (has_ib_port || (dev->caps.flags & MLX4_DEV_CAP_FLAG_IBOE))
-               request_module_nowait(IB_DRV_NAME);
        if (has_eth_port)
                request_module_nowait(EN_DRV_NAME);
+       if (has_ib_port || (dev->caps.flags & MLX4_DEV_CAP_FLAG_IBOE))
+               request_module_nowait(IB_DRV_NAME);
 }
 
 /*
@@ -784,8 +775,8 @@ int mlx4_change_port_types(struct mlx4_dev *dev,
                        dev->caps.port_type[port] = port_types[port - 1];
                        err = mlx4_SET_PORT(dev, port, -1);
                        if (err) {
-                               mlx4_err(dev, "Failed to set port %d, "
-                                             "aborting\n", port);
+                               mlx4_err(dev, "Failed to set port %d, aborting\n",
+                                        port);
                                goto out;
                        }
                }
@@ -868,9 +859,7 @@ static ssize_t set_port_type(struct device *dev,
                }
        }
        if (err) {
-               mlx4_err(mdev, "Auto sensing is not supported on this HCA. "
-                              "Set only 'eth' or 'ib' for both ports "
-                              "(should be the same)\n");
+               mlx4_err(mdev, "Auto sensing is not supported on this HCA. Set only 'eth' or 'ib' for both ports (should be the same)\n");
                goto out;
        }
 
@@ -975,8 +964,8 @@ static ssize_t set_port_ib_mtu(struct device *dev,
                mlx4_CLOSE_PORT(mdev, port);
                err = mlx4_SET_PORT(mdev, port, -1);
                if (err) {
-                       mlx4_err(mdev, "Failed to set port %d, "
-                                     "aborting\n", port);
+                       mlx4_err(mdev, "Failed to set port %d, aborting\n",
+                                port);
                        goto err_set_port;
                }
        }
@@ -995,19 +984,19 @@ static int mlx4_load_fw(struct mlx4_dev *dev)
        priv->fw.fw_icm = mlx4_alloc_icm(dev, priv->fw.fw_pages,
                                         GFP_HIGHUSER | __GFP_NOWARN, 0);
        if (!priv->fw.fw_icm) {
-               mlx4_err(dev, "Couldn't allocate FW area, aborting.\n");
+               mlx4_err(dev, "Couldn't allocate FW area, aborting\n");
                return -ENOMEM;
        }
 
        err = mlx4_MAP_FA(dev, priv->fw.fw_icm);
        if (err) {
-               mlx4_err(dev, "MAP_FA command failed, aborting.\n");
+               mlx4_err(dev, "MAP_FA command failed, aborting\n");
                goto err_free;
        }
 
        err = mlx4_RUN_FW(dev);
        if (err) {
-               mlx4_err(dev, "RUN_FW command failed, aborting.\n");
+               mlx4_err(dev, "RUN_FW command failed, aborting\n");
                goto err_unmap_fa;
        }
 
@@ -1091,30 +1080,30 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
 
        err = mlx4_SET_ICM_SIZE(dev, icm_size, &aux_pages);
        if (err) {
-               mlx4_err(dev, "SET_ICM_SIZE command failed, aborting.\n");
+               mlx4_err(dev, "SET_ICM_SIZE command failed, aborting\n");
                return err;
        }
 
-       mlx4_dbg(dev, "%lld KB of HCA context requires %lld KB aux memory.\n",
+       mlx4_dbg(dev, "%lld KB of HCA context requires %lld KB aux memory\n",
                 (unsigned long long) icm_size >> 10,
                 (unsigned long long) aux_pages << 2);
 
        priv->fw.aux_icm = mlx4_alloc_icm(dev, aux_pages,
                                          GFP_HIGHUSER | __GFP_NOWARN, 0);
        if (!priv->fw.aux_icm) {
-               mlx4_err(dev, "Couldn't allocate aux memory, aborting.\n");
+               mlx4_err(dev, "Couldn't allocate aux memory, aborting\n");
                return -ENOMEM;
        }
 
        err = mlx4_MAP_ICM_AUX(dev, priv->fw.aux_icm);
        if (err) {
-               mlx4_err(dev, "MAP_ICM_AUX command failed, aborting.\n");
+               mlx4_err(dev, "MAP_ICM_AUX command failed, aborting\n");
                goto err_free_aux;
        }
 
        err = mlx4_init_cmpt_table(dev, init_hca->cmpt_base, dev_cap->cmpt_entry_sz);
        if (err) {
-               mlx4_err(dev, "Failed to map cMPT context memory, aborting.\n");
+               mlx4_err(dev, "Failed to map cMPT context memory, aborting\n");
                goto err_unmap_aux;
        }
 
@@ -1125,7 +1114,7 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
                                  init_hca->eqc_base, dev_cap->eqc_entry_sz,
                                  num_eqs, num_eqs, 0, 0);
        if (err) {
-               mlx4_err(dev, "Failed to map EQ context memory, aborting.\n");
+               mlx4_err(dev, "Failed to map EQ context memory, aborting\n");
                goto err_unmap_cmpt;
        }
 
@@ -1146,7 +1135,7 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
                                  dev->caps.num_mtts,
                                  dev->caps.reserved_mtts, 1, 0);
        if (err) {
-               mlx4_err(dev, "Failed to map MTT context memory, aborting.\n");
+               mlx4_err(dev, "Failed to map MTT context memory, aborting\n");
                goto err_unmap_eq;
        }
 
@@ -1156,7 +1145,7 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
                                  dev->caps.num_mpts,
                                  dev->caps.reserved_mrws, 1, 1);
        if (err) {
-               mlx4_err(dev, "Failed to map dMPT context memory, aborting.\n");
+               mlx4_err(dev, "Failed to map dMPT context memory, aborting\n");
                goto err_unmap_mtt;
        }
 
@@ -1167,7 +1156,7 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
                                  dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
                                  0, 0);
        if (err) {
-               mlx4_err(dev, "Failed to map QP context memory, aborting.\n");
+               mlx4_err(dev, "Failed to map QP context memory, aborting\n");
                goto err_unmap_dmpt;
        }
 
@@ -1178,7 +1167,7 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
                                  dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
                                  0, 0);
        if (err) {
-               mlx4_err(dev, "Failed to map AUXC context memory, aborting.\n");
+               mlx4_err(dev, "Failed to map AUXC context memory, aborting\n");
                goto err_unmap_qp;
        }
 
@@ -1189,7 +1178,7 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
                                  dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
                                  0, 0);
        if (err) {
-               mlx4_err(dev, "Failed to map ALTC context memory, aborting.\n");
+               mlx4_err(dev, "Failed to map ALTC context memory, aborting\n");
                goto err_unmap_auxc;
        }
 
@@ -1210,7 +1199,7 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
                                  dev->caps.num_cqs,
                                  dev->caps.reserved_cqs, 0, 0);
        if (err) {
-               mlx4_err(dev, "Failed to map CQ context memory, aborting.\n");
+               mlx4_err(dev, "Failed to map CQ context memory, aborting\n");
                goto err_unmap_rdmarc;
        }
 
@@ -1220,7 +1209,7 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
                                  dev->caps.num_srqs,
                                  dev->caps.reserved_srqs, 0, 0);
        if (err) {
-               mlx4_err(dev, "Failed to map SRQ context memory, aborting.\n");
+               mlx4_err(dev, "Failed to map SRQ context memory, aborting\n");
                goto err_unmap_cq;
        }
 
@@ -1238,7 +1227,7 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
                                  dev->caps.num_mgms + dev->caps.num_amgms,
                                  0, 0);
        if (err) {
-               mlx4_err(dev, "Failed to map MCG context memory, aborting.\n");
+               mlx4_err(dev, "Failed to map MCG context memory, aborting\n");
                goto err_unmap_srq;
        }
 
@@ -1315,7 +1304,7 @@ static void mlx4_slave_exit(struct mlx4_dev *dev)
 
        mutex_lock(&priv->cmd.slave_cmd_mutex);
        if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, MLX4_COMM_TIME))
-               mlx4_warn(dev, "Failed to close slave function.\n");
+               mlx4_warn(dev, "Failed to close slave function\n");
        mutex_unlock(&priv->cmd.slave_cmd_mutex);
 }
 
@@ -1413,7 +1402,7 @@ static int mlx4_init_slave(struct mlx4_dev *dev)
        u32 cmd_channel_ver;
 
        if (atomic_read(&pf_loading)) {
-               mlx4_warn(dev, "PF is not ready. Deferring probe\n");
+               mlx4_warn(dev, "PF is not ready - Deferring probe\n");
                return -EPROBE_DEFER;
        }
 
@@ -1426,8 +1415,7 @@ static int mlx4_init_slave(struct mlx4_dev *dev)
         * NUM_OF_RESET_RETRIES times before leaving.*/
        if (ret_from_reset) {
                if (MLX4_DELAY_RESET_SLAVE == ret_from_reset) {
-                       mlx4_warn(dev, "slave is currently in the "
-                                 "middle of FLR. Deferring probe.\n");
+                       mlx4_warn(dev, "slave is currently in the middle of FLR - Deferring probe\n");
                        mutex_unlock(&priv->cmd.slave_cmd_mutex);
                        return -EPROBE_DEFER;
                } else
@@ -1441,8 +1429,7 @@ static int mlx4_init_slave(struct mlx4_dev *dev)
 
        if (MLX4_COMM_GET_IF_REV(cmd_channel_ver) !=
                MLX4_COMM_GET_IF_REV(slave_read)) {
-               mlx4_err(dev, "slave driver version is not supported"
-                        " by the master\n");
+               mlx4_err(dev, "slave driver version is not supported by the master\n");
                goto err;
        }
 
@@ -1520,8 +1507,7 @@ static void choose_steering_mode(struct mlx4_dev *dev,
 
                        if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER ||
                            dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER)
-                               mlx4_warn(dev, "Must have both UC_STEER and MC_STEER flags "
-                                         "set to use B0 steering. Falling back to A0 steering mode.\n");
+                               mlx4_warn(dev, "Must have both UC_STEER and MC_STEER flags set to use B0 steering - falling back to A0 steering mode\n");
                }
                dev->oper_log_mgm_entry_size =
                        mlx4_log_num_mgm_entry_size > 0 ?
@@ -1529,8 +1515,7 @@ static void choose_steering_mode(struct mlx4_dev *dev,
                        MLX4_DEFAULT_MGM_LOG_ENTRY_SIZE;
                dev->caps.num_qp_per_mgm = mlx4_get_qp_per_mgm(dev);
        }
-       mlx4_dbg(dev, "Steering mode is: %s, oper_log_mgm_entry_size = %d, "
-                "modparam log_num_mgm_entry_size = %d\n",
+       mlx4_dbg(dev, "Steering mode is: %s, oper_log_mgm_entry_size = %d, modparam log_num_mgm_entry_size = %d\n",
                 mlx4_steering_mode_str(dev->caps.steering_mode),
                 dev->oper_log_mgm_entry_size,
                 mlx4_log_num_mgm_entry_size);
@@ -1564,15 +1549,15 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
                err = mlx4_QUERY_FW(dev);
                if (err) {
                        if (err == -EACCES)
-                               mlx4_info(dev, "non-primary physical function, skipping.\n");
+                               mlx4_info(dev, "non-primary physical function, skipping\n");
                        else
-                               mlx4_err(dev, "QUERY_FW command failed, aborting.\n");
+                               mlx4_err(dev, "QUERY_FW command failed, aborting\n");
                        return err;
                }
 
                err = mlx4_load_fw(dev);
                if (err) {
-                       mlx4_err(dev, "Failed to start FW, aborting.\n");
+                       mlx4_err(dev, "Failed to start FW, aborting\n");
                        return err;
                }
 
@@ -1584,7 +1569,7 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
 
                err = mlx4_dev_cap(dev, &dev_cap);
                if (err) {
-                       mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n");
+                       mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting\n");
                        goto err_stop_fw;
                }
 
@@ -1625,7 +1610,7 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
 
                err = mlx4_INIT_HCA(dev, &init_hca);
                if (err) {
-                       mlx4_err(dev, "INIT_HCA command failed, aborting.\n");
+                       mlx4_err(dev, "INIT_HCA command failed, aborting\n");
                        goto err_free_icm;
                }
                /*
@@ -1636,7 +1621,7 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
                        memset(&init_hca, 0, sizeof(init_hca));
                        err = mlx4_QUERY_HCA(dev, &init_hca);
                        if (err) {
-                               mlx4_err(dev, "QUERY_HCA command failed, disable timestamp.\n");
+                               mlx4_err(dev, "QUERY_HCA command failed, disable timestamp\n");
                                dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS;
                        } else {
                                dev->caps.hca_core_clock =
@@ -1649,14 +1634,14 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
                        if (!dev->caps.hca_core_clock) {
                                dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS;
                                mlx4_err(dev,
-                                        "HCA frequency is 0. Timestamping is not supported.");
+                                        "HCA frequency is 0 - timestamping is not supported\n");
                        } else if (map_internal_clock(dev)) {
                                /*
                                 * Map internal clock,
                                 * in case of failure disable timestamping
                                 */
                                dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS;
-                               mlx4_err(dev, "Failed to map internal clock. Timestamping is not supported.\n");
+                               mlx4_err(dev, "Failed to map internal clock. Timestamping is not supported\n");
                        }
                }
        } else {
@@ -1683,7 +1668,7 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
 
        err = mlx4_QUERY_ADAPTER(dev, &adapter);
        if (err) {
-               mlx4_err(dev, "QUERY_ADAPTER command failed, aborting.\n");
+               mlx4_err(dev, "QUERY_ADAPTER command failed, aborting\n");
                goto unmap_bf;
        }
 
@@ -1793,79 +1778,69 @@ static int mlx4_setup_hca(struct mlx4_dev *dev)
 
        err = mlx4_init_uar_table(dev);
        if (err) {
-               mlx4_err(dev, "Failed to initialize "
-                        "user access region table, aborting.\n");
-               return err;
+               mlx4_err(dev, "Failed to initialize user access region table, aborting\n");
+                return err;
        }
 
        err = mlx4_uar_alloc(dev, &priv->driver_uar);
        if (err) {
-               mlx4_err(dev, "Failed to allocate driver access region, "
-                        "aborting.\n");
+               mlx4_err(dev, "Failed to allocate driver access region, aborting\n");
                goto err_uar_table_free;
        }
 
        priv->kar = ioremap((phys_addr_t) priv->driver_uar.pfn << PAGE_SHIFT, PAGE_SIZE);
        if (!priv->kar) {
-               mlx4_err(dev, "Couldn't map kernel access region, "
-                        "aborting.\n");
+               mlx4_err(dev, "Couldn't map kernel access region, aborting\n");
                err = -ENOMEM;
                goto err_uar_free;
        }
 
        err = mlx4_init_pd_table(dev);
        if (err) {
-               mlx4_err(dev, "Failed to initialize "
-                        "protection domain table, aborting.\n");
+               mlx4_err(dev, "Failed to initialize protection domain table, aborting\n");
                goto err_kar_unmap;
        }
 
        err = mlx4_init_xrcd_table(dev);
        if (err) {
-               mlx4_err(dev, "Failed to initialize "
-                        "reliable connection domain table, aborting.\n");
+               mlx4_err(dev, "Failed to initialize reliable connection domain table, aborting\n");
                goto err_pd_table_free;
        }
 
        err = mlx4_init_mr_table(dev);
        if (err) {
-               mlx4_err(dev, "Failed to initialize "
-                        "memory region table, aborting.\n");
+               mlx4_err(dev, "Failed to initialize memory region table, aborting\n");
                goto err_xrcd_table_free;
        }
 
        if (!mlx4_is_slave(dev)) {
                err = mlx4_init_mcg_table(dev);
                if (err) {
-                       mlx4_err(dev, "Failed to initialize multicast group table, aborting.\n");
+                       mlx4_err(dev, "Failed to initialize multicast group table, aborting\n");
                        goto err_mr_table_free;
                }
        }
 
        err = mlx4_init_eq_table(dev);
        if (err) {
-               mlx4_err(dev, "Failed to initialize "
-                        "event queue table, aborting.\n");
+               mlx4_err(dev, "Failed to initialize event queue table, aborting\n");
                goto err_mcg_table_free;
        }
 
        err = mlx4_cmd_use_events(dev);
        if (err) {
-               mlx4_err(dev, "Failed to switch to event-driven "
-                        "firmware commands, aborting.\n");
+               mlx4_err(dev, "Failed to switch to event-driven firmware commands, aborting\n");
                goto err_eq_table_free;
        }
 
        err = mlx4_NOP(dev);
        if (err) {
                if (dev->flags & MLX4_FLAG_MSI_X) {
-                       mlx4_warn(dev, "NOP command failed to generate MSI-X "
-                                 "interrupt IRQ %d).\n",
+                       mlx4_warn(dev, "NOP command failed to generate MSI-X interrupt IRQ %d)\n",
                                  priv->eq_table.eq[dev->caps.num_comp_vectors].irq);
-                       mlx4_warn(dev, "Trying again without MSI-X.\n");
+                       mlx4_warn(dev, "Trying again without MSI-X\n");
                } else {
-                       mlx4_err(dev, "NOP command failed to generate interrupt "
-                                "(IRQ %d), aborting.\n",
+                       mlx4_err(dev, "NOP command failed to generate interrupt (IRQ %d), aborting\n",
                                 priv->eq_table.eq[dev->caps.num_comp_vectors].irq);
                        mlx4_err(dev, "BIOS or ACPI interrupt routing problem?\n");
                }
@@ -1877,28 +1852,25 @@ static int mlx4_setup_hca(struct mlx4_dev *dev)
 
        err = mlx4_init_cq_table(dev);
        if (err) {
-               mlx4_err(dev, "Failed to initialize "
-                        "completion queue table, aborting.\n");
+               mlx4_err(dev, "Failed to initialize completion queue table, aborting\n");
                goto err_cmd_poll;
        }
 
        err = mlx4_init_srq_table(dev);
        if (err) {
-               mlx4_err(dev, "Failed to initialize "
-                        "shared receive queue table, aborting.\n");
+               mlx4_err(dev, "Failed to initialize shared receive queue table, aborting\n");
                goto err_cq_table_free;
        }
 
        err = mlx4_init_qp_table(dev);
        if (err) {
-               mlx4_err(dev, "Failed to initialize "
-                        "queue pair table, aborting.\n");
+               mlx4_err(dev, "Failed to initialize queue pair table, aborting\n");
                goto err_srq_table_free;
        }
 
        err = mlx4_init_counters_table(dev);
        if (err && err != -ENOENT) {
-               mlx4_err(dev, "Failed to initialize counters table, aborting.\n");
+               mlx4_err(dev, "Failed to initialize counters table, aborting\n");
                goto err_qp_table_free;
        }
 
@@ -1908,9 +1880,8 @@ static int mlx4_setup_hca(struct mlx4_dev *dev)
                        err = mlx4_get_port_ib_caps(dev, port,
                                                    &ib_port_default_caps);
                        if (err)
-                               mlx4_warn(dev, "failed to get port %d default "
-                                         "ib capabilities (%d). Continuing "
-                                         "with caps = 0\n", port, err);
+                               mlx4_warn(dev, "failed to get port %d default ib capabilities (%d). Continuing with caps = 0\n",
+                                         port, err);
                        dev->caps.ib_port_def_cap[port] = ib_port_default_caps;
 
                        /* initialize per-slave default ib port capabilities */
@@ -1920,7 +1891,7 @@ static int mlx4_setup_hca(struct mlx4_dev *dev)
                                        if (i == mlx4_master_func_num(dev))
                                                continue;
                                        priv->mfunc.master.slave_state[i].ib_cap_mask[port] =
-                                                       ib_port_default_caps;
+                                               ib_port_default_caps;
                                }
                        }
 
@@ -1933,7 +1904,7 @@ static int mlx4_setup_hca(struct mlx4_dev *dev)
                                            dev->caps.pkey_table_len[port] : -1);
                        if (err) {
                                mlx4_err(dev, "Failed to set port %d, aborting\n",
-                                       port);
+                                        port);
                                goto err_counters_table_free;
                        }
                }
@@ -2009,7 +1980,7 @@ static void mlx4_enable_msi_x(struct mlx4_dev *dev)
                        kfree(entries);
                        goto no_msi;
                } else if (nreq < MSIX_LEGACY_SZ +
-                                 dev->caps.num_ports * MIN_MSIX_P_PORT) {
+                          dev->caps.num_ports * MIN_MSIX_P_PORT) {
                        /*Working in legacy mode , all EQ's shared*/
                        dev->caps.comp_pool           = 0;
                        dev->caps.num_comp_vectors = nreq - 1;
@@ -2209,8 +2180,7 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data)
 
        err = pci_enable_device(pdev);
        if (err) {
-               dev_err(&pdev->dev, "Cannot enable PCI device, "
-                       "aborting.\n");
+               dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
                return err;
        }
 
@@ -2257,14 +2227,13 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data)
         */
        if (!(pci_dev_data & MLX4_PCI_DEV_IS_VF) &&
            !(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
-               dev_err(&pdev->dev, "Missing DCS, aborting."
-                       "(driver_data: 0x%x, pci_resource_flags(pdev, 0):0x%lx)\n",
+               dev_err(&pdev->dev, "Missing DCS, aborting (driver_data: 0x%x, pci_resource_flags(pdev, 0):0x%lx)\n",
                        pci_dev_data, pci_resource_flags(pdev, 0));
                err = -ENODEV;
                goto err_disable_pdev;
        }
        if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
-               dev_err(&pdev->dev, "Missing UAR, aborting.\n");
+               dev_err(&pdev->dev, "Missing UAR, aborting\n");
                err = -ENODEV;
                goto err_disable_pdev;
        }
@@ -2279,21 +2248,19 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data)
 
        err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
        if (err) {
-               dev_warn(&pdev->dev, "Warning: couldn't set 64-bit PCI DMA mask.\n");
+               dev_warn(&pdev->dev, "Warning: couldn't set 64-bit PCI DMA mask\n");
                err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
                if (err) {
-                       dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting.\n");
+                       dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting\n");
                        goto err_release_regions;
                }
        }
        err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
        if (err) {
-               dev_warn(&pdev->dev, "Warning: couldn't set 64-bit "
-                        "consistent PCI DMA mask.\n");
+               dev_warn(&pdev->dev, "Warning: couldn't set 64-bit consistent PCI DMA mask\n");
                err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
                if (err) {
-                       dev_err(&pdev->dev, "Can't set consistent PCI DMA mask, "
-                               "aborting.\n");
+                       dev_err(&pdev->dev, "Can't set consistent PCI DMA mask, aborting\n");
                        goto err_release_regions;
                }
        }
@@ -2324,7 +2291,7 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data)
                if (total_vfs) {
                        unsigned vfs_offset = 0;
                        for (i = 0; i < sizeof(nvfs)/sizeof(nvfs[0]) &&
-                            vfs_offset + nvfs[i] < extended_func_num(pdev);
+                                    vfs_offset + nvfs[i] < extended_func_num(pdev);
                             vfs_offset += nvfs[i], i++)
                                ;
                        if (i == sizeof(nvfs)/sizeof(nvfs[0])) {
@@ -2350,8 +2317,7 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data)
                        if (err < 0)
                                goto err_free_dev;
                        else {
-                               mlx4_warn(dev, "Multiple PFs not yet supported."
-                                         " Skipping PF.\n");
+                               mlx4_warn(dev, "Multiple PFs not yet supported - Skipping PF\n");
                                err = -EINVAL;
                                goto err_free_dev;
                        }
@@ -2361,8 +2327,8 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data)
                        mlx4_warn(dev, "Enabling SR-IOV with %d VFs\n",
                                  total_vfs);
                        dev->dev_vfs = kzalloc(
-                                       total_vfs * sizeof(*dev->dev_vfs),
-                                       GFP_KERNEL);
+                               total_vfs * sizeof(*dev->dev_vfs),
+                               GFP_KERNEL);
                        if (NULL == dev->dev_vfs) {
                                mlx4_err(dev, "Failed to allocate memory for VFs\n");
                                err = 0;
@@ -2370,14 +2336,14 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data)
                                atomic_inc(&pf_loading);
                                err = pci_enable_sriov(pdev, total_vfs);
                                if (err) {
-                                       mlx4_err(dev, "Failed to enable SR-IOV, continuing without SR-IOV (err = %d).\n",
+                                       mlx4_err(dev, "Failed to enable SR-IOV, continuing without SR-IOV (err = %d)\n",
                                                 err);
                                        atomic_dec(&pf_loading);
                                        err = 0;
                                } else {
                                        mlx4_warn(dev, "Running in master mode\n");
                                        dev->flags |= MLX4_FLAG_SRIOV |
-                                                     MLX4_FLAG_MASTER;
+                                               MLX4_FLAG_MASTER;
                                        dev->num_vfs = total_vfs;
                                        sriov_initialized = 1;
                                }
@@ -2394,7 +2360,7 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data)
                 */
                err = mlx4_reset(dev);
                if (err) {
-                       mlx4_err(dev, "Failed to reset HCA, aborting.\n");
+                       mlx4_err(dev, "Failed to reset HCA, aborting\n");
                        goto err_rel_own;
                }
        }
@@ -2402,7 +2368,7 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data)
 slave_start:
        err = mlx4_cmd_init(dev);
        if (err) {
-               mlx4_err(dev, "Failed to init command interface, aborting.\n");
+               mlx4_err(dev, "Failed to init command interface, aborting\n");
                goto err_sriov;
        }
 
@@ -2416,8 +2382,7 @@ slave_start:
                        dev->num_slaves = 0;
                        err = mlx4_multi_func_init(dev);
                        if (err) {
-                               mlx4_err(dev, "Failed to init slave mfunc"
-                                        " interface, aborting.\n");
+                               mlx4_err(dev, "Failed to init slave mfunc interface, aborting\n");
                                goto err_cmd;
                        }
                }
@@ -2440,7 +2405,8 @@ slave_start:
         * No return code for this call, just warn the user in case of PCI
         * express device capabilities are under-satisfied by the bus.
         */
-       mlx4_check_pcie_caps(dev);
+       if (!mlx4_is_slave(dev))
+               mlx4_check_pcie_caps(dev);
 
        /* In master functions, the communication channel must be initialized
         * after obtaining its address from fw */
@@ -2448,8 +2414,7 @@ slave_start:
                unsigned sum = 0;
                err = mlx4_multi_func_init(dev);
                if (err) {
-                       mlx4_err(dev, "Failed to init master mfunc"
-                                "interface, aborting.\n");
+                       mlx4_err(dev, "Failed to init master mfunc interface, aborting\n");
                        goto err_close;
                }
                if (sriov_initialized) {
@@ -2460,10 +2425,7 @@ slave_start:
                        if (ib_ports &&
                            (num_vfs_argc > 1 || probe_vfs_argc > 1)) {
                                mlx4_err(dev,
-                                        "Invalid syntax of num_vfs/probe_vfs "
-                                        "with IB port. Single port VFs syntax"
-                                        " is only supported when all ports "
-                                        "are configured as ethernet\n");
+                                        "Invalid syntax of num_vfs/probe_vfs with IB port - single port VFs syntax is only supported when all ports are configured as ethernet\n");
                                goto err_close;
                        }
                        for (i = 0; i < sizeof(nvfs)/sizeof(nvfs[0]); i++) {
@@ -2489,8 +2451,7 @@ slave_start:
        if ((mlx4_is_mfunc(dev)) &&
            !(dev->flags & MLX4_FLAG_MSI_X)) {
                err = -ENOSYS;
-               mlx4_err(dev, "INTx is not supported in multi-function mode."
-                        " aborting.\n");
+               mlx4_err(dev, "INTx is not supported in multi-function mode, aborting\n");
                goto err_free_eq;
        }
 
@@ -2828,11 +2789,10 @@ static int __init mlx4_verify_params(void)
        if (mlx4_log_num_mgm_entry_size != -1 &&
            (mlx4_log_num_mgm_entry_size < MLX4_MIN_MGM_LOG_ENTRY_SIZE ||
             mlx4_log_num_mgm_entry_size > MLX4_MAX_MGM_LOG_ENTRY_SIZE)) {
-               pr_warning("mlx4_core: mlx4_log_num_mgm_entry_size (%d) not "
-                          "in legal range (-1 or %d..%d)\n",
-                          mlx4_log_num_mgm_entry_size,
-                          MLX4_MIN_MGM_LOG_ENTRY_SIZE,
-                          MLX4_MAX_MGM_LOG_ENTRY_SIZE);
+               pr_warn("mlx4_core: mlx4_log_num_mgm_entry_size (%d) not in legal range (-1 or %d..%d)\n",
+                       mlx4_log_num_mgm_entry_size,
+                       MLX4_MIN_MGM_LOG_ENTRY_SIZE,
+                       MLX4_MAX_MGM_LOG_ENTRY_SIZE);
                return -1;
        }
 
index 80ccb4edf825f8888c6487626f2f380c7b27479b..4c36def8e10f9b518a1ba8a3f05340eb63c4dc0a 100644 (file)
@@ -638,7 +638,7 @@ static int find_entry(struct mlx4_dev *dev, u8 port,
 
                if (!(be32_to_cpu(mgm->members_count) & 0xffffff)) {
                        if (*index != hash) {
-                               mlx4_err(dev, "Found zero MGID in AMGM.\n");
+                               mlx4_err(dev, "Found zero MGID in AMGM\n");
                                err = -EINVAL;
                        }
                        return err;
@@ -874,7 +874,7 @@ static void mlx4_err_rule(struct mlx4_dev *dev, char *str,
        mlx4_err(dev, "%s", buf);
 
        if (len >= BUF_SIZE)
-               mlx4_err(dev, "Network rule error message was truncated, print buffer is too small.\n");
+               mlx4_err(dev, "Network rule error message was truncated, print buffer is too small\n");
 }
 
 int mlx4_flow_attach(struct mlx4_dev *dev,
@@ -897,7 +897,7 @@ int mlx4_flow_attach(struct mlx4_dev *dev,
                ret = parse_trans_rule(dev, cur, mailbox->buf + size);
                if (ret < 0) {
                        mlx4_free_cmd_mailbox(dev, mailbox);
-                       return -EINVAL;
+                       return ret;
                }
                size += ret;
        }
@@ -905,10 +905,10 @@ int mlx4_flow_attach(struct mlx4_dev *dev,
        ret = mlx4_QP_FLOW_STEERING_ATTACH(dev, mailbox, size >> 2, reg_id);
        if (ret == -ENOMEM)
                mlx4_err_rule(dev,
-                             "mcg table is full. Fail to register network rule.\n",
+                             "mcg table is full. Fail to register network rule\n",
                              rule);
        else if (ret)
-               mlx4_err_rule(dev, "Fail to register network rule.\n", rule);
+               mlx4_err_rule(dev, "Fail to register network rule\n", rule);
 
        mlx4_free_cmd_mailbox(dev, mailbox);
 
@@ -994,7 +994,7 @@ int mlx4_qp_attach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
 
        members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
        if (members_count == dev->caps.num_qp_per_mgm) {
-               mlx4_err(dev, "MGM at index %x is full.\n", index);
+               mlx4_err(dev, "MGM at index %x is full\n", index);
                err = -ENOMEM;
                goto out;
        }
@@ -1042,7 +1042,7 @@ out:
        }
        if (err && link && index != -1) {
                if (index < dev->caps.num_mgms)
-                       mlx4_warn(dev, "Got AMGM index %d < %d",
+                       mlx4_warn(dev, "Got AMGM index %d < %d\n",
                                  index, dev->caps.num_mgms);
                else
                        mlx4_bitmap_free(&priv->mcg_table.bitmap,
@@ -1133,7 +1133,7 @@ int mlx4_qp_detach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
 
                if (amgm_index) {
                        if (amgm_index < dev->caps.num_mgms)
-                               mlx4_warn(dev, "MGM entry %d had AMGM index %d < %d",
+                               mlx4_warn(dev, "MGM entry %d had AMGM index %d < %d\n",
                                          index, amgm_index, dev->caps.num_mgms);
                        else
                                mlx4_bitmap_free(&priv->mcg_table.bitmap,
@@ -1153,7 +1153,7 @@ int mlx4_qp_detach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
                        goto out;
 
                if (index < dev->caps.num_mgms)
-                       mlx4_warn(dev, "entry %d had next AMGM index %d < %d",
+                       mlx4_warn(dev, "entry %d had next AMGM index %d < %d\n",
                                  prev, index, dev->caps.num_mgms);
                else
                        mlx4_bitmap_free(&priv->mcg_table.bitmap,
index f9c46510196341a6089b0a23d7b53455dad69ae5..52c1e7da74c4035c36aea715b968afd0ff478ce4 100644 (file)
@@ -216,18 +216,19 @@ extern int mlx4_debug_level;
 #define mlx4_debug_level       (0)
 #endif /* CONFIG_MLX4_DEBUG */
 
-#define mlx4_dbg(mdev, format, arg...)                                 \
+#define mlx4_dbg(mdev, format, ...)                                    \
 do {                                                                   \
        if (mlx4_debug_level)                                           \
-               dev_printk(KERN_DEBUG, &mdev->pdev->dev, format, ##arg); \
+               dev_printk(KERN_DEBUG, &(mdev)->pdev->dev, format,      \
+                          ##__VA_ARGS__);                              \
 } while (0)
 
-#define mlx4_err(mdev, format, arg...) \
-       dev_err(&mdev->pdev->dev, format, ##arg)
-#define mlx4_info(mdev, format, arg...) \
-       dev_info(&mdev->pdev->dev, format, ##arg)
-#define mlx4_warn(mdev, format, arg...) \
-       dev_warn(&mdev->pdev->dev, format, ##arg)
+#define mlx4_err(mdev, format, ...)                                    \
+       dev_err(&(mdev)->pdev->dev, format, ##__VA_ARGS__)
+#define mlx4_info(mdev, format, ...)                                   \
+       dev_info(&(mdev)->pdev->dev, format, ##__VA_ARGS__)
+#define mlx4_warn(mdev, format, ...)                                   \
+       dev_warn(&(mdev)->pdev->dev, format, ##__VA_ARGS__)
 
 extern int mlx4_log_num_mgm_entry_size;
 extern int log_mtts_per_seg;
index 04d9b6fe3e8000fdb14714b09770b11aa46079e8..b5db1bf361dc6adac67dbade22f04ab03601956c 100644 (file)
@@ -830,26 +830,26 @@ __printf(3, 4)
 int en_print(const char *level, const struct mlx4_en_priv *priv,
             const char *format, ...);
 
-#define en_dbg(mlevel, priv, format, arg...)                   \
-do {                                                           \
-       if (NETIF_MSG_##mlevel & priv->msg_enable)              \
-               en_print(KERN_DEBUG, priv, format, ##arg);      \
+#define en_dbg(mlevel, priv, format, ...)                              \
+do {                                                                   \
+       if (NETIF_MSG_##mlevel & (priv)->msg_enable)                    \
+               en_print(KERN_DEBUG, priv, format, ##__VA_ARGS__);      \
 } while (0)
-#define en_warn(priv, format, arg...)                  \
-       en_print(KERN_WARNING, priv, format, ##arg)
-#define en_err(priv, format, arg...)                   \
-       en_print(KERN_ERR, priv, format, ##arg)
-#define en_info(priv, format, arg...)                  \
-       en_print(KERN_INFO, priv, format, ## arg)
-
-#define mlx4_err(mdev, format, arg...)                 \
-       pr_err("%s %s: " format, DRV_NAME,              \
-              dev_name(&mdev->pdev->dev), ##arg)
-#define mlx4_info(mdev, format, arg...)                        \
-       pr_info("%s %s: " format, DRV_NAME,             \
-               dev_name(&mdev->pdev->dev), ##arg)
-#define mlx4_warn(mdev, format, arg...)                        \
-       pr_warning("%s %s: " format, DRV_NAME,          \
-                  dev_name(&mdev->pdev->dev), ##arg)
+#define en_warn(priv, format, ...)                                     \
+       en_print(KERN_WARNING, priv, format, ##__VA_ARGS__)
+#define en_err(priv, format, ...)                                      \
+       en_print(KERN_ERR, priv, format, ##__VA_ARGS__)
+#define en_info(priv, format, ...)                                     \
+       en_print(KERN_INFO, priv, format, ##__VA_ARGS__)
+
+#define mlx4_err(mdev, format, ...)                                    \
+       pr_err(DRV_NAME " %s: " format,                                 \
+              dev_name(&(mdev)->pdev->dev), ##__VA_ARGS__)
+#define mlx4_info(mdev, format, ...)                                   \
+       pr_info(DRV_NAME " %s: " format,                                \
+               dev_name(&(mdev)->pdev->dev), ##__VA_ARGS__)
+#define mlx4_warn(mdev, format, ...)                                   \
+       pr_warn(DRV_NAME " %s: " format,                                \
+               dev_name(&(mdev)->pdev->dev), ##__VA_ARGS__)
 
 #endif
index 24835853b7533ec7bf9f73c05e8a7cd713414956..64fb3e6431a07a50c1ede317b9d68410faa920d1 100644 (file)
@@ -250,8 +250,8 @@ static void mlx4_free_mtt_range(struct mlx4_dev *dev, u32 offset, int order)
                                                       MLX4_CMD_TIME_CLASS_A,
                                                       MLX4_CMD_WRAPPED);
                if (err)
-                       mlx4_warn(dev, "Failed to free mtt range at:"
-                                 "%d order:%d\n", offset, order);
+                       mlx4_warn(dev, "Failed to free mtt range at:%d order:%d\n",
+                                 offset, order);
                return;
        }
         __mlx4_free_mtt_range(dev, offset, order);
@@ -436,8 +436,8 @@ static int mlx4_mr_free_reserved(struct mlx4_dev *dev, struct mlx4_mr *mr)
                                     key_to_hw_index(mr->key) &
                                     (dev->caps.num_mpts - 1));
                if (err) {
-                       mlx4_warn(dev, "HW2SW_MPT failed (%d),", err);
-                       mlx4_warn(dev, "MR has MWs bound to it.\n");
+                       mlx4_warn(dev, "HW2SW_MPT failed (%d), MR has MWs bound to it\n",
+                                 err);
                        return err;
                }
 
@@ -773,7 +773,7 @@ int mlx4_init_mr_table(struct mlx4_dev *dev)
                        mlx4_alloc_mtt_range(dev,
                                             fls(dev->caps.reserved_mtts - 1));
                if (priv->reserved_mtts < 0) {
-                       mlx4_warn(dev, "MTT table of order %u is too small.\n",
+                       mlx4_warn(dev, "MTT table of order %u is too small\n",
                                  mr_table->mtt_buddy.max_order);
                        err = -ENOMEM;
                        goto err_reserve_mtts;
@@ -954,8 +954,8 @@ void mlx4_fmr_unmap(struct mlx4_dev *dev, struct mlx4_fmr *fmr,
        mailbox = mlx4_alloc_cmd_mailbox(dev);
        if (IS_ERR(mailbox)) {
                err = PTR_ERR(mailbox);
-               printk(KERN_WARNING "mlx4_ib: mlx4_alloc_cmd_mailbox"
-                      " failed (%d)\n", err);
+               printk(KERN_WARNING "mlx4_ib: mlx4_alloc_cmd_mailbox failed (%d)\n",
+                      err);
                return;
        }
 
index cfcad26ed40f60b0e5b992195339d8c12c0e68d7..376f2f1d445ea3828ba3dee3f2eb214182d3a2ca 100644 (file)
@@ -244,8 +244,8 @@ void __mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac)
        if (validate_index(dev, table, index))
                goto out;
        if (--table->refs[index]) {
-               mlx4_dbg(dev, "Have more references for index %d,"
-                        "no need to modify mac table\n", index);
+               mlx4_dbg(dev, "Have more references for index %d, no need to modify mac table\n",
+                        index);
                goto out;
        }
 
@@ -443,9 +443,8 @@ void __mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, u16 vlan)
        }
 
        if (--table->refs[index]) {
-               mlx4_dbg(dev, "Have %d more references for index %d,"
-                        "no need to modify vlan table\n", table->refs[index],
-                        index);
+               mlx4_dbg(dev, "Have %d more references for index %d, no need to modify vlan table\n",
+                        table->refs[index], index);
                goto out;
        }
        table->entries[index] = 0;
@@ -706,8 +705,7 @@ static int mlx4_common_set_port(struct mlx4_dev *dev, int slave, u32 in_mod,
                                        if (!memcmp(gid_entry_mbox->raw, gid_entry_tbl->raw,
                                                    sizeof(gid_entry_tbl->raw))) {
                                                /* found duplicate */
-                                               mlx4_warn(dev, "requested gid entry for slave:%d "
-                                                         "is a duplicate of gid at index %d\n",
+                                               mlx4_warn(dev, "requested gid entry for slave:%d is a duplicate of gid at index %d\n",
                                                          slave, i);
                                                return -EINVAL;
                                        }
@@ -1106,6 +1104,9 @@ int mlx4_get_slave_from_roce_gid(struct mlx4_dev *dev, int port, u8 *gid,
        }
 
        if (found_ix >= 0) {
+               /* Calculate a slave_gid which is the slave number in the gid
+                * table and not a globally unique slave number.
+                */
                if (found_ix < MLX4_ROCE_PF_GIDS)
                        slave_gid = 0;
                else if (found_ix < MLX4_ROCE_PF_GIDS + (vf_gids % num_vfs) *
@@ -1118,41 +1119,43 @@ int mlx4_get_slave_from_roce_gid(struct mlx4_dev *dev, int port, u8 *gid,
                          ((vf_gids % num_vfs) * ((vf_gids / num_vfs + 1)))) /
                         (vf_gids / num_vfs)) + vf_gids % num_vfs + 1;
 
+               /* Calculate the globally unique slave id */
                if (slave_gid) {
                        struct mlx4_active_ports exclusive_ports;
                        struct mlx4_active_ports actv_ports;
                        struct mlx4_slaves_pport slaves_pport_actv;
                        unsigned max_port_p_one;
-                       int num_slaves_before = 1;
+                       int num_vfs_before = 0;
+                       int candidate_slave_gid;
 
+                       /* Calculate how many VFs are on the previous port, if exists */
                        for (i = 1; i < port; i++) {
                                bitmap_zero(exclusive_ports.ports, dev->caps.num_ports);
-                               set_bit(i, exclusive_ports.ports);
+                               set_bit(i - 1, exclusive_ports.ports);
                                slaves_pport_actv =
                                        mlx4_phys_to_slaves_pport_actv(
                                                        dev, &exclusive_ports);
-                               num_slaves_before += bitmap_weight(
+                               num_vfs_before += bitmap_weight(
                                                slaves_pport_actv.slaves,
                                                dev->num_vfs + 1);
                        }
 
-                       if (slave_gid < num_slaves_before) {
-                               bitmap_zero(exclusive_ports.ports, dev->caps.num_ports);
-                               set_bit(port - 1, exclusive_ports.ports);
-                               slaves_pport_actv =
-                                       mlx4_phys_to_slaves_pport_actv(
-                                                       dev, &exclusive_ports);
-                               slave_gid += bitmap_weight(
-                                               slaves_pport_actv.slaves,
-                                               dev->num_vfs + 1) -
-                                               num_slaves_before;
-                       }
-                       actv_ports = mlx4_get_active_ports(dev, slave_gid);
+                       /* candidate_slave_gid isn't necessarily the correct slave, but
+                        * it has the same number of ports and is assigned to the same
+                        * ports as the real slave we're looking for. On dual port VF,
+                        * slave_gid = [single port VFs on port <port>] +
+                        * [offset of the current slave from the first dual port VF] +
+                        * 1 (for the PF).
+                        */
+                       candidate_slave_gid = slave_gid + num_vfs_before;
+
+                       actv_ports = mlx4_get_active_ports(dev, candidate_slave_gid);
                        max_port_p_one = find_first_bit(
                                actv_ports.ports, dev->caps.num_ports) +
                                bitmap_weight(actv_ports.ports,
                                              dev->caps.num_ports) + 1;
 
+                       /* Calculate the real slave number */
                        for (i = 1; i < max_port_p_one; i++) {
                                if (i == port)
                                        continue;
index 8e0c3cc2a1ec786739de7298fc450c483d8fe37a..14089d9e1667fcc4287fe08ca34590e279f66561 100644 (file)
@@ -164,18 +164,17 @@ u64 mlx4_make_profile(struct mlx4_dev *dev,
                }
 
                if (total_size > dev_cap->max_icm_sz) {
-                       mlx4_err(dev, "Profile requires 0x%llx bytes; "
-                                 "won't fit in 0x%llx bytes of context memory.\n",
-                                 (unsigned long long) total_size,
-                                 (unsigned long long) dev_cap->max_icm_sz);
+                       mlx4_err(dev, "Profile requires 0x%llx bytes; won't fit in 0x%llx bytes of context memory\n",
+                                (unsigned long long) total_size,
+                                (unsigned long long) dev_cap->max_icm_sz);
                        kfree(profile);
                        return -ENOMEM;
                }
 
                if (profile[i].size)
-                       mlx4_dbg(dev, "  profile[%2d] (%6s): 2^%02d entries @ 0x%10llx, "
-                                 "size 0x%10llx\n",
-                                i, res_name[profile[i].type], profile[i].log_num,
+                       mlx4_dbg(dev, "  profile[%2d] (%6s): 2^%02d entries @ 0x%10llx, size 0x%10llx\n",
+                                i, res_name[profile[i].type],
+                                profile[i].log_num,
                                 (unsigned long long) profile[i].start,
                                 (unsigned long long) profile[i].size);
        }
index 61d64ebffd56e64b0fa8bf2d0fb69308e3d02c49..9bdb6aeb37218dc4e9b53a066bf28b1522c20d33 100644 (file)
@@ -264,8 +264,8 @@ void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt)
                               MLX4_CMD_FREE_RES,
                               MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
                if (err) {
-                       mlx4_warn(dev, "Failed to release qp range"
-                                 " base:%d cnt:%d\n", base_qpn, cnt);
+                       mlx4_warn(dev, "Failed to release qp range base:%d cnt:%d\n",
+                                 base_qpn, cnt);
                }
        } else
                 __mlx4_qp_release_range(dev, base_qpn, cnt);
@@ -577,8 +577,7 @@ int mlx4_qp_to_ready(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
                err = mlx4_qp_modify(dev, mtt, states[i], states[i + 1],
                                     context, 0, 0, qp);
                if (err) {
-                       mlx4_err(dev, "Failed to bring QP to state: "
-                                "%d with error: %d\n",
+                       mlx4_err(dev, "Failed to bring QP to state: %d with error: %d\n",
                                 states[i + 1], err);
                        return err;
                }
index dd1b5093d8b170812451fb3f67158e1bb8e2b908..ea1c6d092145a5d7e150e8549577edc87f3b0472 100644 (file)
@@ -72,8 +72,7 @@ int mlx4_reset(struct mlx4_dev *dev)
        hca_header = kmalloc(256, GFP_KERNEL);
        if (!hca_header) {
                err = -ENOMEM;
-               mlx4_err(dev, "Couldn't allocate memory to save HCA "
-                         "PCI header, aborting.\n");
+               mlx4_err(dev, "Couldn't allocate memory to save HCA PCI header, aborting\n");
                goto out;
        }
 
@@ -84,8 +83,7 @@ int mlx4_reset(struct mlx4_dev *dev)
                        continue;
                if (pci_read_config_dword(dev->pdev, i * 4, hca_header + i)) {
                        err = -ENODEV;
-                       mlx4_err(dev, "Couldn't save HCA "
-                                 "PCI header, aborting.\n");
+                       mlx4_err(dev, "Couldn't save HCA PCI header, aborting\n");
                        goto out;
                }
        }
@@ -94,7 +92,7 @@ int mlx4_reset(struct mlx4_dev *dev)
                        MLX4_RESET_SIZE);
        if (!reset) {
                err = -ENOMEM;
-               mlx4_err(dev, "Couldn't map HCA reset register, aborting.\n");
+               mlx4_err(dev, "Couldn't map HCA reset register, aborting\n");
                goto out;
        }
 
@@ -133,8 +131,7 @@ int mlx4_reset(struct mlx4_dev *dev)
 
        if (vendor == 0xffff) {
                err = -ENODEV;
-               mlx4_err(dev, "PCI device did not come back after reset, "
-                         "aborting.\n");
+               mlx4_err(dev, "PCI device did not come back after reset, aborting\n");
                goto out;
        }
 
@@ -144,16 +141,14 @@ int mlx4_reset(struct mlx4_dev *dev)
                if (pcie_capability_write_word(dev->pdev, PCI_EXP_DEVCTL,
                                               devctl)) {
                        err = -ENODEV;
-                       mlx4_err(dev, "Couldn't restore HCA PCI Express "
-                                "Device Control register, aborting.\n");
+                       mlx4_err(dev, "Couldn't restore HCA PCI Express Device Control register, aborting\n");
                        goto out;
                }
                linkctl = hca_header[(pcie_cap + PCI_EXP_LNKCTL) / 4];
                if (pcie_capability_write_word(dev->pdev, PCI_EXP_LNKCTL,
                                               linkctl)) {
                        err = -ENODEV;
-                       mlx4_err(dev, "Couldn't restore HCA PCI Express "
-                                "Link control register, aborting.\n");
+                       mlx4_err(dev, "Couldn't restore HCA PCI Express Link control register, aborting\n");
                        goto out;
                }
        }
@@ -164,8 +159,8 @@ int mlx4_reset(struct mlx4_dev *dev)
 
                if (pci_write_config_dword(dev->pdev, i * 4, hca_header[i])) {
                        err = -ENODEV;
-                       mlx4_err(dev, "Couldn't restore HCA reg %x, "
-                                 "aborting.\n", i);
+                       mlx4_err(dev, "Couldn't restore HCA reg %x, aborting\n",
+                                i);
                        goto out;
                }
        }
@@ -173,8 +168,7 @@ int mlx4_reset(struct mlx4_dev *dev)
        if (pci_write_config_dword(dev->pdev, PCI_COMMAND,
                                   hca_header[PCI_COMMAND / 4])) {
                err = -ENODEV;
-               mlx4_err(dev, "Couldn't restore HCA COMMAND, "
-                         "aborting.\n");
+               mlx4_err(dev, "Couldn't restore HCA COMMAND, aborting\n");
                goto out;
        }
 
index 3b5f53ef29b292d6edcb027f6b64b9c108a3a03b..a95df9d2645d99c88a4d0ae7857b5a8ef328e414 100644 (file)
@@ -3733,6 +3733,25 @@ static int qp_detach(struct mlx4_dev *dev, struct mlx4_qp *qp,
        }
 }
 
+static int mlx4_adjust_port(struct mlx4_dev *dev, int slave,
+                           u8 *gid, enum mlx4_protocol prot)
+{
+       int real_port;
+
+       if (prot != MLX4_PROT_ETH)
+               return 0;
+
+       if (dev->caps.steering_mode == MLX4_STEERING_MODE_B0 ||
+           dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) {
+               real_port = mlx4_slave_convert_port(dev, slave, gid[5]);
+               if (real_port < 0)
+                       return -EINVAL;
+               gid[5] = real_port;
+       }
+
+       return 0;
+}
+
 int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
                               struct mlx4_vhcr *vhcr,
                               struct mlx4_cmd_mailbox *inbox,
@@ -3768,6 +3787,10 @@ int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
                if (err)
                        goto ex_detach;
        } else {
+               err = mlx4_adjust_port(dev, slave, gid, prot);
+               if (err)
+                       goto ex_put;
+
                err = rem_mcg_res(dev, slave, rqp, gid, prot, type, &reg_id);
                if (err)
                        goto ex_put;
@@ -3857,7 +3880,7 @@ static int add_eth_header(struct mlx4_dev *dev, int slave,
                }
        }
        if (!be_mac) {
-               pr_err("Failed adding eth header to FS rule, Can't find matching MAC for port %d .\n",
+               pr_err("Failed adding eth header to FS rule, Can't find matching MAC for port %d\n",
                       port);
                return -EINVAL;
        }
@@ -3900,7 +3923,7 @@ int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
        qpn = be32_to_cpu(ctrl->qpn) & 0xffffff;
        err = get_res(dev, slave, qpn, RES_QP, &rqp);
        if (err) {
-               pr_err("Steering rule with qpn 0x%x rejected.\n", qpn);
+               pr_err("Steering rule with qpn 0x%x rejected\n", qpn);
                return err;
        }
        rule_header = (struct _rule_hw *)(ctrl + 1);
@@ -3918,7 +3941,7 @@ int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
        case MLX4_NET_TRANS_RULE_ID_IPV4:
        case MLX4_NET_TRANS_RULE_ID_TCP:
        case MLX4_NET_TRANS_RULE_ID_UDP:
-               pr_warn("Can't attach FS rule without L2 headers, adding L2 header.\n");
+               pr_warn("Can't attach FS rule without L2 headers, adding L2 header\n");
                if (add_eth_header(dev, slave, inbox, rlist, header_id)) {
                        err = -EINVAL;
                        goto err_put;
@@ -3927,7 +3950,7 @@ int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
                        sizeof(struct mlx4_net_trans_rule_hw_eth) >> 2;
                break;
        default:
-               pr_err("Corrupted mailbox.\n");
+               pr_err("Corrupted mailbox\n");
                err = -EINVAL;
                goto err_put;
        }
@@ -3941,7 +3964,7 @@ int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
 
        err = add_res_range(dev, slave, vhcr->out_param, 1, RES_FS_RULE, qpn);
        if (err) {
-               mlx4_err(dev, "Fail to add flow steering resources.\n ");
+               mlx4_err(dev, "Fail to add flow steering resources\n");
                /* detach rule*/
                mlx4_cmd(dev, vhcr->out_param, 0, 0,
                         MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
@@ -3979,7 +4002,7 @@ int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave,
 
        err = rem_res_range(dev, slave, vhcr->in_param, 1, RES_FS_RULE, 0);
        if (err) {
-               mlx4_err(dev, "Fail to remove flow steering resources.\n ");
+               mlx4_err(dev, "Fail to remove flow steering resources\n");
                goto out;
        }
 
@@ -4108,8 +4131,8 @@ static void rem_slave_qps(struct mlx4_dev *dev, int slave)
 
        err = move_all_busy(dev, slave, RES_QP);
        if (err)
-               mlx4_warn(dev, "rem_slave_qps: Could not move all qps to busy"
-                         "for slave %d\n", slave);
+               mlx4_warn(dev, "rem_slave_qps: Could not move all qps to busy for slave %d\n",
+                         slave);
 
        spin_lock_irq(mlx4_tlock(dev));
        list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
@@ -4147,10 +4170,8 @@ static void rem_slave_qps(struct mlx4_dev *dev, int slave)
                                                       MLX4_CMD_TIME_CLASS_A,
                                                       MLX4_CMD_NATIVE);
                                        if (err)
-                                               mlx4_dbg(dev, "rem_slave_qps: failed"
-                                                        " to move slave %d qpn %d to"
-                                                        " reset\n", slave,
-                                                        qp->local_qpn);
+                                               mlx4_dbg(dev, "rem_slave_qps: failed to move slave %d qpn %d to reset\n",
+                                                        slave, qp->local_qpn);
                                        atomic_dec(&qp->rcq->ref_count);
                                        atomic_dec(&qp->scq->ref_count);
                                        atomic_dec(&qp->mtt->ref_count);
@@ -4184,8 +4205,8 @@ static void rem_slave_srqs(struct mlx4_dev *dev, int slave)
 
        err = move_all_busy(dev, slave, RES_SRQ);
        if (err)
-               mlx4_warn(dev, "rem_slave_srqs: Could not move all srqs to "
-                         "busy for slave %d\n", slave);
+               mlx4_warn(dev, "rem_slave_srqs: Could not move all srqs - too busy for slave %d\n",
+                         slave);
 
        spin_lock_irq(mlx4_tlock(dev));
        list_for_each_entry_safe(srq, tmp, srq_list, com.list) {
@@ -4215,9 +4236,7 @@ static void rem_slave_srqs(struct mlx4_dev *dev, int slave)
                                                       MLX4_CMD_TIME_CLASS_A,
                                                       MLX4_CMD_NATIVE);
                                        if (err)
-                                               mlx4_dbg(dev, "rem_slave_srqs: failed"
-                                                        " to move slave %d srq %d to"
-                                                        " SW ownership\n",
+                                               mlx4_dbg(dev, "rem_slave_srqs: failed to move slave %d srq %d to SW ownership\n",
                                                         slave, srqn);
 
                                        atomic_dec(&srq->mtt->ref_count);
@@ -4252,8 +4271,8 @@ static void rem_slave_cqs(struct mlx4_dev *dev, int slave)
 
        err = move_all_busy(dev, slave, RES_CQ);
        if (err)
-               mlx4_warn(dev, "rem_slave_cqs: Could not move all cqs to "
-                         "busy for slave %d\n", slave);
+               mlx4_warn(dev, "rem_slave_cqs: Could not move all cqs - too busy for slave %d\n",
+                         slave);
 
        spin_lock_irq(mlx4_tlock(dev));
        list_for_each_entry_safe(cq, tmp, cq_list, com.list) {
@@ -4283,9 +4302,7 @@ static void rem_slave_cqs(struct mlx4_dev *dev, int slave)
                                                       MLX4_CMD_TIME_CLASS_A,
                                                       MLX4_CMD_NATIVE);
                                        if (err)
-                                               mlx4_dbg(dev, "rem_slave_cqs: failed"
-                                                        " to move slave %d cq %d to"
-                                                        " SW ownership\n",
+                                               mlx4_dbg(dev, "rem_slave_cqs: failed to move slave %d cq %d to SW ownership\n",
                                                         slave, cqn);
                                        atomic_dec(&cq->mtt->ref_count);
                                        state = RES_CQ_ALLOCATED;
@@ -4317,8 +4334,8 @@ static void rem_slave_mrs(struct mlx4_dev *dev, int slave)
 
        err = move_all_busy(dev, slave, RES_MPT);
        if (err)
-               mlx4_warn(dev, "rem_slave_mrs: Could not move all mpts to "
-                         "busy for slave %d\n", slave);
+               mlx4_warn(dev, "rem_slave_mrs: Could not move all mpts - too busy for slave %d\n",
+                         slave);
 
        spin_lock_irq(mlx4_tlock(dev));
        list_for_each_entry_safe(mpt, tmp, mpt_list, com.list) {
@@ -4353,9 +4370,7 @@ static void rem_slave_mrs(struct mlx4_dev *dev, int slave)
                                                     MLX4_CMD_TIME_CLASS_A,
                                                     MLX4_CMD_NATIVE);
                                        if (err)
-                                               mlx4_dbg(dev, "rem_slave_mrs: failed"
-                                                        " to move slave %d mpt %d to"
-                                                        " SW ownership\n",
+                                               mlx4_dbg(dev, "rem_slave_mrs: failed to move slave %d mpt %d to SW ownership\n",
                                                         slave, mptn);
                                        if (mpt->mtt)
                                                atomic_dec(&mpt->mtt->ref_count);
@@ -4387,8 +4402,8 @@ static void rem_slave_mtts(struct mlx4_dev *dev, int slave)
 
        err = move_all_busy(dev, slave, RES_MTT);
        if (err)
-               mlx4_warn(dev, "rem_slave_mtts: Could not move all mtts to "
-                         "busy for slave %d\n", slave);
+               mlx4_warn(dev, "rem_slave_mtts: Could not move all mtts  - too busy for slave %d\n",
+                         slave);
 
        spin_lock_irq(mlx4_tlock(dev));
        list_for_each_entry_safe(mtt, tmp, mtt_list, com.list) {
@@ -4490,8 +4505,8 @@ static void rem_slave_eqs(struct mlx4_dev *dev, int slave)
 
        err = move_all_busy(dev, slave, RES_EQ);
        if (err)
-               mlx4_warn(dev, "rem_slave_eqs: Could not move all eqs to "
-                         "busy for slave %d\n", slave);
+               mlx4_warn(dev, "rem_slave_eqs: Could not move all eqs - too busy for slave %d\n",
+                         slave);
 
        spin_lock_irq(mlx4_tlock(dev));
        list_for_each_entry_safe(eq, tmp, eq_list, com.list) {
@@ -4523,9 +4538,8 @@ static void rem_slave_eqs(struct mlx4_dev *dev, int slave)
                                                           MLX4_CMD_TIME_CLASS_A,
                                                           MLX4_CMD_NATIVE);
                                        if (err)
-                                               mlx4_dbg(dev, "rem_slave_eqs: failed"
-                                                        " to move slave %d eqs %d to"
-                                                        " SW ownership\n", slave, eqn);
+                                               mlx4_dbg(dev, "rem_slave_eqs: failed to move slave %d eqs %d to SW ownership\n",
+                                                        slave, eqn);
                                        mlx4_free_cmd_mailbox(dev, mailbox);
                                        atomic_dec(&eq->mtt->ref_count);
                                        state = RES_EQ_RESERVED;
@@ -4554,8 +4568,8 @@ static void rem_slave_counters(struct mlx4_dev *dev, int slave)
 
        err = move_all_busy(dev, slave, RES_COUNTER);
        if (err)
-               mlx4_warn(dev, "rem_slave_counters: Could not move all counters to "
-                         "busy for slave %d\n", slave);
+               mlx4_warn(dev, "rem_slave_counters: Could not move all counters - too busy for slave %d\n",
+                         slave);
 
        spin_lock_irq(mlx4_tlock(dev));
        list_for_each_entry_safe(counter, tmp, counter_list, com.list) {
@@ -4585,8 +4599,8 @@ static void rem_slave_xrcdns(struct mlx4_dev *dev, int slave)
 
        err = move_all_busy(dev, slave, RES_XRCD);
        if (err)
-               mlx4_warn(dev, "rem_slave_xrcdns: Could not move all xrcdns to "
-                         "busy for slave %d\n", slave);
+               mlx4_warn(dev, "rem_slave_xrcdns: Could not move all xrcdns - too busy for slave %d\n",
+                         slave);
 
        spin_lock_irq(mlx4_tlock(dev));
        list_for_each_entry_safe(xrcd, tmp, xrcdn_list, com.list) {
@@ -4731,10 +4745,8 @@ void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work)
                                       0, MLX4_CMD_UPDATE_QP,
                                       MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
                        if (err) {
-                               mlx4_info(dev, "UPDATE_QP failed for slave %d, "
-                                         "port %d, qpn %d (%d)\n",
-                                         work->slave, port, qp->local_qpn,
-                                         err);
+                               mlx4_info(dev, "UPDATE_QP failed for slave %d, port %d, qpn %d (%d)\n",
+                                         work->slave, port, qp->local_qpn, err);
                                errors++;
                        }
                }
index 405c4fbcd0ad1cb56453938012ecb9035de20f23..87d1b018a9c394309a6ee78310640690146bf245 100644 (file)
@@ -620,8 +620,8 @@ static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent)
                               mlx5_command_str(msg_to_opcode(ent->in)),
                               msg_to_opcode(ent->in));
        }
-       mlx5_core_dbg(dev, "err %d, delivery status %s(%d)\n", err,
-                     deliv_status_to_str(ent->status), ent->status);
+       mlx5_core_dbg(dev, "err %d, delivery status %s(%d)\n",
+                     err, deliv_status_to_str(ent->status), ent->status);
 
        return err;
 }
index 64a61b286b2c959fbb67c72dcc098199d74ff68d..7f39ebcd6ad01b3dc175ffd57b3239f9f7154a8a 100644 (file)
@@ -208,7 +208,8 @@ static int mlx5_eq_int(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
                 */
                rmb();
 
-               mlx5_core_dbg(eq->dev, "eqn %d, eqe type %s\n", eq->eqn, eqe_type_str(eqe->type));
+               mlx5_core_dbg(eq->dev, "eqn %d, eqe type %s\n",
+                             eq->eqn, eqe_type_str(eqe->type));
                switch (eqe->type) {
                case MLX5_EVENT_TYPE_COMP:
                        cqn = be32_to_cpu(eqe->data.comp.cqn) & 0xffffff;
@@ -270,14 +271,16 @@ static int mlx5_eq_int(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
                                u16 func_id = be16_to_cpu(eqe->data.req_pages.func_id);
                                s32 npages = be32_to_cpu(eqe->data.req_pages.num_pages);
 
-                               mlx5_core_dbg(dev, "page request for func 0x%x, napges %d\n", func_id, npages);
+                               mlx5_core_dbg(dev, "page request for func 0x%x, npages %d\n",
+                                             func_id, npages);
                                mlx5_core_req_pages_handler(dev, func_id, npages);
                        }
                        break;
 
 
                default:
-                       mlx5_core_warn(dev, "Unhandled event 0x%x on EQ 0x%x\n", eqe->type, eq->eqn);
+                       mlx5_core_warn(dev, "Unhandled event 0x%x on EQ 0x%x\n",
+                                      eqe->type, eq->eqn);
                        break;
                }
 
index c3eee5f70051e0855abf623ad0b1b10614d38b82..ee24f132e319988daaae5f870466bfc3599e3783 100644 (file)
@@ -66,10 +66,10 @@ static int set_dma_caps(struct pci_dev *pdev)
 
        err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
        if (err) {
-               dev_warn(&pdev->dev, "Warning: couldn't set 64-bit PCI DMA mask.\n");
+               dev_warn(&pdev->dev, "Warning: couldn't set 64-bit PCI DMA mask\n");
                err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
                if (err) {
-                       dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting.\n");
+                       dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting\n");
                        return err;
                }
        }
@@ -77,11 +77,11 @@ static int set_dma_caps(struct pci_dev *pdev)
        err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
        if (err) {
                dev_warn(&pdev->dev,
-                        "Warning: couldn't set 64-bit consistent PCI DMA mask.\n");
+                        "Warning: couldn't set 64-bit consistent PCI DMA mask\n");
                err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
                if (err) {
                        dev_err(&pdev->dev,
-                               "Can't set consistent PCI DMA mask, aborting.\n");
+                               "Can't set consistent PCI DMA mask, aborting\n");
                        return err;
                }
        }
@@ -95,7 +95,7 @@ static int request_bar(struct pci_dev *pdev)
        int err = 0;
 
        if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
-               dev_err(&pdev->dev, "Missing registers BAR, aborting.\n");
+               dev_err(&pdev->dev, "Missing registers BAR, aborting\n");
                return -ENODEV;
        }
 
@@ -319,13 +319,13 @@ int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev)
 
        err = pci_enable_device(pdev);
        if (err) {
-               dev_err(&pdev->dev, "Cannot enable PCI device, aborting.\n");
+               dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
                goto err_dbg;
        }
 
        err = request_bar(pdev);
        if (err) {
-               dev_err(&pdev->dev, "error requesting BARs, aborting.\n");
+               dev_err(&pdev->dev, "error requesting BARs, aborting\n");
                goto err_disable;
        }
 
index 68b74e1ae1b016c9b7b9ce5866a466dd131dccac..f0c9f9a7a36142f1a7fded7a88120e1cff213aaa 100644 (file)
 
 extern int mlx5_core_debug_mask;
 
-#define mlx5_core_dbg(dev, format, arg...)                                    \
-pr_debug("%s:%s:%d:(pid %d): " format, (dev)->priv.name, __func__, __LINE__,   \
-        current->pid, ##arg)
+#define mlx5_core_dbg(dev, format, ...)                                        \
+       pr_debug("%s:%s:%d:(pid %d): " format,                          \
+                (dev)->priv.name, __func__, __LINE__, current->pid,    \
+                ##__VA_ARGS__)
 
-#define mlx5_core_dbg_mask(dev, mask, format, arg...)                         \
-do {                                                                          \
-       if ((mask) & mlx5_core_debug_mask)                                     \
-               pr_debug("%s:%s:%d:(pid %d): " format, (dev)->priv.name,       \
-                        __func__, __LINE__, current->pid, ##arg);             \
+#define mlx5_core_dbg_mask(dev, mask, format, ...)                     \
+do {                                                                   \
+       if ((mask) & mlx5_core_debug_mask)                              \
+               mlx5_core_dbg(dev, format, ##__VA_ARGS__);              \
 } while (0)
 
-#define mlx5_core_err(dev, format, arg...) \
-pr_err("%s:%s:%d:(pid %d): " format, (dev)->priv.name, __func__, __LINE__,     \
-       current->pid, ##arg)
+#define mlx5_core_err(dev, format, ...)                                        \
+       pr_err("%s:%s:%d:(pid %d): " format,                            \
+              (dev)->priv.name, __func__, __LINE__, current->pid,      \
+              ##__VA_ARGS__)
 
-#define mlx5_core_warn(dev, format, arg...) \
-pr_warn("%s:%s:%d:(pid %d): " format, (dev)->priv.name, __func__, __LINE__,    \
-       current->pid, ##arg)
+#define mlx5_core_warn(dev, format, ...)                               \
+       pr_warn("%s:%s:%d:(pid %d): " format,                           \
+               (dev)->priv.name, __func__, __LINE__, current->pid,     \
+               ##__VA_ARGS__)
 
 enum {
        MLX5_CMD_DATA, /* print command payload only */
index 4cc92764940477c4f9622fc0cbc4238a08d81283..0a11b3fe9c193a7923cb4dd1f2292460690bf337 100644 (file)
@@ -73,7 +73,7 @@ int mlx5_core_create_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr,
        }
 
        if (err) {
-               mlx5_core_dbg(dev, "cmd exec faile %d\n", err);
+               mlx5_core_dbg(dev, "cmd exec failed %d\n", err);
                return err;
        }
 
@@ -191,7 +191,8 @@ int mlx5_core_create_psv(struct mlx5_core_dev *dev, u32 pdn,
        }
 
        if (out.hdr.status) {
-               mlx5_core_err(dev, "create_psv bad status %d\n", out.hdr.status);
+               mlx5_core_err(dev, "create_psv bad status %d\n",
+                             out.hdr.status);
                return mlx5_cmd_status_to_err(&out.hdr);
        }
 
@@ -220,7 +221,8 @@ int mlx5_core_destroy_psv(struct mlx5_core_dev *dev, int psv_num)
        }
 
        if (out.hdr.status) {
-               mlx5_core_err(dev, "destroy_psv bad status %d\n", out.hdr.status);
+               mlx5_core_err(dev, "destroy_psv bad status %d\n",
+                             out.hdr.status);
                err = mlx5_cmd_status_to_err(&out.hdr);
                goto out;
        }
index d59790a82bc3d5c9f3def1e4a664a6531b93ad7d..c2a953ef0e675801827ac9bec57e1aaf399dbcca 100644 (file)
@@ -311,7 +311,8 @@ retry:
        in->num_entries = cpu_to_be32(npages);
        err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out));
        if (err) {
-               mlx5_core_warn(dev, "func_id 0x%x, npages %d, err %d\n", func_id, npages, err);
+               mlx5_core_warn(dev, "func_id 0x%x, npages %d, err %d\n",
+                              func_id, npages, err);
                goto out_alloc;
        }
        dev->priv.fw_pages += npages;
@@ -319,7 +320,8 @@ retry:
        if (out.hdr.status) {
                err = mlx5_cmd_status_to_err(&out.hdr);
                if (err) {
-                       mlx5_core_warn(dev, "func_id 0x%x, npages %d, status %d\n", func_id, npages, out.hdr.status);
+                       mlx5_core_warn(dev, "func_id 0x%x, npages %d, status %d\n",
+                                      func_id, npages, out.hdr.status);
                        goto out_alloc;
                }
        }
@@ -378,7 +380,7 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages,
        mlx5_core_dbg(dev, "npages %d, outlen %d\n", npages, outlen);
        err = mlx5_cmd_exec(dev, &in, sizeof(in), out, outlen);
        if (err) {
-               mlx5_core_err(dev, "failed recliaming pages\n");
+               mlx5_core_err(dev, "failed reclaiming pages\n");
                goto out_free;
        }
        dev->priv.fw_pages -= npages;
@@ -414,8 +416,8 @@ static void pages_work_handler(struct work_struct *work)
                err = give_pages(dev, req->func_id, req->npages, 1);
 
        if (err)
-               mlx5_core_warn(dev, "%s fail %d\n", req->npages < 0 ?
-                              "reclaim" : "give", err);
+               mlx5_core_warn(dev, "%s fail %d\n",
+                              req->npages < 0 ? "reclaim" : "give", err);
 
        kfree(req);
 }
@@ -487,7 +489,8 @@ int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev)
                                            optimal_reclaimed_pages(),
                                            &nclaimed);
                        if (err) {
-                               mlx5_core_warn(dev, "failed reclaiming pages (%d)\n", err);
+                               mlx5_core_warn(dev, "failed reclaiming pages (%d)\n",
+                                              err);
                                return err;
                        }
                        if (nclaimed)
index 510576213dd0c8e823a6feecb87d427f275ca90e..8145b4668229d6a483e9b36fdc457387a6c6ab41 100644 (file)
@@ -79,7 +79,7 @@ int mlx5_core_create_qp(struct mlx5_core_dev *dev,
 
        err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out));
        if (err) {
-               mlx5_core_warn(dev, "ret %d", err);
+               mlx5_core_warn(dev, "ret %d\n", err);
                return err;
        }
 
@@ -96,7 +96,7 @@ int mlx5_core_create_qp(struct mlx5_core_dev *dev,
        err = radix_tree_insert(&table->tree, qp->qpn, qp);
        spin_unlock_irq(&table->lock);
        if (err) {
-               mlx5_core_warn(dev, "err %d", err);
+               mlx5_core_warn(dev, "err %d\n", err);
                goto err_cmd;
        }
 
index 16435b3cfa9f133a3fe937dc96670853873e7485..6c7c78baedcaf590f6f6c78f5155a1df2fa594e9 100644 (file)
@@ -1504,15 +1504,15 @@ ks8695_probe(struct platform_device *pdev)
        if (ksp->phyiface_regs && ksp->link_irq == -1) {
                ks8695_init_switch(ksp);
                ksp->dtype = KS8695_DTYPE_LAN;
-               SET_ETHTOOL_OPS(ndev, &ks8695_ethtool_ops);
+               ndev->ethtool_ops = &ks8695_ethtool_ops;
        } else if (ksp->phyiface_regs && ksp->link_irq != -1) {
                ks8695_init_wan_phy(ksp);
                ksp->dtype = KS8695_DTYPE_WAN;
-               SET_ETHTOOL_OPS(ndev, &ks8695_wan_ethtool_ops);
+               ndev->ethtool_ops = &ks8695_wan_ethtool_ops;
        } else {
                /* No initialisation since HPNA does not have a PHY */
                ksp->dtype = KS8695_DTYPE_HPNA;
-               SET_ETHTOOL_OPS(ndev, &ks8695_ethtool_ops);
+               ndev->ethtool_ops = &ks8695_ethtool_ops;
        }
 
        /* And bring up the net_device with the net core */
index e0c92e0e5e1d463f0242088d184394608b243cb6..13767eb36a48eee204a5a1e1f5d5b8c963bb01b1 100644 (file)
@@ -1471,7 +1471,7 @@ static int ks8851_probe(struct spi_device *spi)
 
        skb_queue_head_init(&ks->txq);
 
-       SET_ETHTOOL_OPS(ndev, &ks8851_ethtool_ops);
+       ndev->ethtool_ops = &ks8851_ethtool_ops;
        SET_NETDEV_DEV(ndev, &spi->dev);
 
        spi_set_drvdata(spi, ks);
index 14ac0e2bc09fcbc50f65ceead949ecd7d15d6130..064a48d0c368a267826e2f77bacb2da9fa366e1e 100644 (file)
@@ -4930,7 +4930,7 @@ static void netdev_tx_timeout(struct net_device *dev)
                 * Only reset the hardware if time between calls is long
                 * enough.
                 */
-               if (jiffies - last_reset <= dev->watchdog_timeo)
+               if (time_before_eq(jiffies, last_reset + dev->watchdog_timeo))
                        hw_priv = NULL;
        }
 
@@ -7072,6 +7072,7 @@ static int pcidev_init(struct pci_dev *pdev, const struct pci_device_id *id)
                dev = alloc_etherdev(sizeof(struct dev_priv));
                if (!dev)
                        goto pcidev_init_reg_err;
+               SET_NETDEV_DEV(dev, &pdev->dev);
                info->netdev[i] = dev;
 
                priv = netdev_priv(dev);
@@ -7106,7 +7107,7 @@ static int pcidev_init(struct pci_dev *pdev, const struct pci_device_id *id)
                }
 
                dev->netdev_ops = &netdev_ops;
-               SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
+               dev->ethtool_ops = &netdev_ethtool_ops;
                if (register_netdev(dev))
                        goto pcidev_init_reg_err;
                port_set_power_saving(port, true);
index c7b40aa21f22fe2a909754fe4e2a9a7b8bad5898..b1b5f66b8b6910ad2dc38c2157d8b12aba1668c1 100644 (file)
@@ -1593,7 +1593,7 @@ static int enc28j60_probe(struct spi_device *spi)
        dev->irq = spi->irq;
        dev->netdev_ops = &enc28j60_netdev_ops;
        dev->watchdog_timeo = TX_TIMEOUT;
-       SET_ETHTOOL_OPS(dev, &enc28j60_ethtool_ops);
+       dev->ethtool_ops = &enc28j60_ethtool_ops;
 
        enc28j60_lowpower(priv, true);
 
index 130f6b204efa29cb9c97c98b4e3b0f52b569cd35..f3d5d79f1cd15de8dff66fa4aeab6fccaa25ab8e 100644 (file)
@@ -4112,7 +4112,7 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        setup_timer(&mgp->watchdog_timer, myri10ge_watchdog_timer,
                    (unsigned long)mgp);
 
-       SET_ETHTOOL_OPS(netdev, &myri10ge_ethtool_ops);
+       netdev->ethtool_ops = &myri10ge_ethtool_ops;
        INIT_WORK(&mgp->watchdog_work, myri10ge_watchdog);
        status = register_netdev(netdev);
        if (status != 0) {
index 64ec2a437f46a3280e9377c55d8226a19f82d089..291fba8b9f07351effff8ebbc9b229fc40c0e885 100644 (file)
@@ -927,7 +927,7 @@ static int natsemi_probe1(struct pci_dev *pdev, const struct pci_device_id *ent)
        dev->netdev_ops = &natsemi_netdev_ops;
        dev->watchdog_timeo = TX_TIMEOUT;
 
-       SET_ETHTOOL_OPS(dev, &ethtool_ops);
+       dev->ethtool_ops = &ethtool_ops;
 
        if (mtu)
                dev->mtu = mtu;
index dbccf1de49ecbf7011a167585caefdeb082fa4de..19bb8244b9e3e1056a2835bf2c2434f1e6ae65a3 100644 (file)
@@ -2030,7 +2030,7 @@ static int ns83820_init_one(struct pci_dev *pci_dev,
                pci_dev->subsystem_vendor, pci_dev->subsystem_device);
 
        ndev->netdev_ops = &netdev_ops;
-       SET_ETHTOOL_OPS(ndev, &ops);
+       ndev->ethtool_ops = &ops;
        ndev->watchdog_timeo = 5 * HZ;
        pci_set_drvdata(pci_dev, ndev);
 
index a2844ff322c4c62bed8957a7f3797ad321359cbf..e3cf38e6ce3c11deb08676d0ee58ec4bf62412c7 100644 (file)
@@ -534,15 +534,6 @@ static inline void s2io_start_all_tx_queue(struct s2io_nic *sp)
        netif_tx_start_all_queues(sp->dev);
 }
 
-static inline void s2io_start_tx_queue(struct s2io_nic *sp, int fifo_no)
-{
-       if (!sp->config.multiq)
-               sp->mac_control.fifos[fifo_no].queue_state =
-                       FIFO_QUEUE_START;
-
-       netif_tx_start_all_queues(sp->dev);
-}
-
 static inline void s2io_wake_all_tx_queue(struct s2io_nic *sp)
 {
        if (!sp->config.multiq) {
@@ -7919,7 +7910,7 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
 
        /*  Driver entry points */
        dev->netdev_ops = &s2io_netdev_ops;
-       SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
+       dev->ethtool_ops = &netdev_ethtool_ops;
        dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
                NETIF_F_TSO | NETIF_F_TSO6 |
                NETIF_F_RXCSUM | NETIF_F_LRO;
index f8f073880f84bccd5f0daedb8c1f08233c43e6c8..ddcc81ad1ae1f3c2167f03859a436a8fc3938dc9 100644 (file)
@@ -1128,5 +1128,5 @@ static const struct ethtool_ops vxge_ethtool_ops = {
 
 void vxge_initialize_ethtool_ops(struct net_device *ndev)
 {
-       SET_ETHTOOL_OPS(ndev, &vxge_ethtool_ops);
+       ndev->ethtool_ops = &vxge_ethtool_ops;
 }
index d107bcbb8543035110a98a82a21a7e72c8ec1303..7a0deadd53bf14743e4c530b895c8658e59c8be2 100644 (file)
@@ -2122,7 +2122,7 @@ static int vxge_open_vpaths(struct vxgedev *vdev)
 static void adaptive_coalesce_tx_interrupts(struct vxge_fifo *fifo)
 {
        fifo->interrupt_count++;
-       if (jiffies > fifo->jiffies + HZ / 100) {
+       if (time_before(fifo->jiffies + HZ / 100, jiffies)) {
                struct __vxge_hw_fifo *hw_fifo = fifo->handle;
 
                fifo->jiffies = jiffies;
@@ -2150,7 +2150,7 @@ static void adaptive_coalesce_tx_interrupts(struct vxge_fifo *fifo)
 static void adaptive_coalesce_rx_interrupts(struct vxge_ring *ring)
 {
        ring->interrupt_count++;
-       if (jiffies > ring->jiffies + HZ / 100) {
+       if (time_before(ring->jiffies + HZ / 100, jiffies)) {
                struct __vxge_hw_ring *hw_ring = ring->handle;
 
                ring->jiffies = jiffies;
index fddb464aeab3a517c362d12ad4891eb3e2529cae..e8235c5c5e696cc17226a0878e376753967e837e 100644 (file)
@@ -5766,7 +5766,7 @@ static int nv_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
                dev->netdev_ops = &nv_netdev_ops_optimized;
 
        netif_napi_add(dev, &np->napi, nv_napi_poll, RX_WORK_PER_LOOP);
-       SET_ETHTOOL_OPS(dev, &ops);
+       dev->ethtool_ops = &ops;
        dev->watchdog_timeo = NV_WATCHDOG_TIMEO;
 
        pci_set_drvdata(pci_dev, dev);
index a588ffde970041def37cae92b215011d88b6eea6..44c8be1c68051ec9a9b79f4ba481060022613e79 100644 (file)
@@ -4,7 +4,7 @@
 
 config PCH_GBE
        tristate "OKI SEMICONDUCTOR IOH(ML7223/ML7831) GbE"
-       depends on PCI && (X86 || COMPILE_TEST)
+       depends on PCI && (X86_32 || COMPILE_TEST)
        select MII
        select PTP_1588_CLOCK_PCH
        ---help---
index 826f0ccdc23c818139d3951c953b56a3b885d6e8..114d2fe52cc2d900bc469825d28b3e3d380ceeed 100644 (file)
@@ -508,5 +508,5 @@ static const struct ethtool_ops pch_gbe_ethtool_ops = {
 
 void pch_gbe_set_ethtool_ops(struct net_device *netdev)
 {
-       SET_ETHTOOL_OPS(netdev, &pch_gbe_ethtool_ops);
+       netdev->ethtool_ops = &pch_gbe_ethtool_ops;
 }
index b6bdeb3c19711ac960646dfdbeef2207b86f366d..9a997e4c3e084a16368ebcae6ed53d42a0c0524c 100644 (file)
@@ -724,10 +724,8 @@ static int hamachi_init_one(struct pci_dev *pdev,
 
        /* The Hamachi-specific entries in the device structure. */
        dev->netdev_ops = &hamachi_netdev_ops;
-       if (chip_tbl[hmp->chip_id].flags & CanHaveMII)
-               SET_ETHTOOL_OPS(dev, &ethtool_ops);
-       else
-               SET_ETHTOOL_OPS(dev, &ethtool_ops_no_mii);
+       dev->ethtool_ops = (chip_tbl[hmp->chip_id].flags & CanHaveMII) ?
+               &ethtool_ops : &ethtool_ops_no_mii;
        dev->watchdog_timeo = TX_TIMEOUT;
        if (mtu)
                dev->mtu = mtu;
index 9a6cb482dcd0b8bf5e26e3e2c784f57b032f5236..69a8dc0950720b7f57a83483d1cf86f0f4f9f35d 100644 (file)
@@ -472,7 +472,7 @@ static int yellowfin_init_one(struct pci_dev *pdev,
 
        /* The Yellowfin-specific entries in the device structure. */
        dev->netdev_ops = &netdev_ops;
-       SET_ETHTOOL_OPS(dev, &ethtool_ops);
+       dev->ethtool_ops = &ethtool_ops;
        dev->watchdog_timeo = TX_TIMEOUT;
 
        if (mtu)
index c14bd3116e454edad88d27f7ab8923cb999ec885..d49cba1290814ecc175d925fca6ae2dd9227cded 100644 (file)
@@ -66,6 +66,17 @@ config QLCNIC_VXLAN
          Say Y here if you want to enable hardware offload support for
          Virtual eXtensible Local Area Network (VXLAN) in the driver.
 
+config QLCNIC_HWMON
+       bool "QLOGIC QLCNIC 82XX and 83XX family HWMON support"
+       depends on QLCNIC && HWMON && !(QLCNIC=y && HWMON=m)
+       default y
+       ---help---
+         This configuration parameter can be used to read the
+         board temperature in Converged Ethernet devices
+         supported by qlcnic.
+
+         This data is available via the hwmon sysfs interface.
+
 config QLGE
        tristate "QLogic QLGE 10Gb Ethernet Driver Support"
        depends on PCI
index f09c35d669b3ec7d8898f96f0ffd9ce7362ddb13..5bf05818a12cfa707d138c420908546daaa502a5 100644 (file)
@@ -1373,7 +1373,7 @@ netxen_setup_netdev(struct netxen_adapter *adapter,
 
        netxen_nic_change_mtu(netdev, netdev->mtu);
 
-       SET_ETHTOOL_OPS(netdev, &netxen_nic_ethtool_ops);
+       netdev->ethtool_ops = &netxen_nic_ethtool_ops;
 
        netdev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
                              NETIF_F_RXCSUM;
index 2eabd44f8914de91ac4eaebbed17d4a08c66c96f..b5d6bc1a8b0024770c919e6ccf1f49a0cc6da1a8 100644 (file)
@@ -3838,7 +3838,7 @@ static int ql3xxx_probe(struct pci_dev *pdev,
 
        /* Set driver entry points */
        ndev->netdev_ops = &ql3xxx_netdev_ops;
-       SET_ETHTOOL_OPS(ndev, &ql3xxx_ethtool_ops);
+       ndev->ethtool_ops = &ql3xxx_ethtool_ops;
        ndev->watchdog_timeo = 5 * HZ;
 
        netif_napi_add(ndev, &qdev->napi, ql_poll, 64);
index 7b52a88923ef2e53fadf9aca0185e2af492aa7be..6e7527e2b595ffa5082d58eead178e8b2dc94fa8 100644 (file)
@@ -39,8 +39,8 @@
 
 #define _QLCNIC_LINUX_MAJOR 5
 #define _QLCNIC_LINUX_MINOR 3
-#define _QLCNIC_LINUX_SUBVERSION 57
-#define QLCNIC_LINUX_VERSIONID  "5.3.57"
+#define _QLCNIC_LINUX_SUBVERSION 59
+#define QLCNIC_LINUX_VERSIONID  "5.3.59"
 #define QLCNIC_DRV_IDC_VER  0x01
 #define QLCNIC_DRIVER_VERSION  ((_QLCNIC_LINUX_MAJOR << 16) |\
                 (_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION))
@@ -537,6 +537,7 @@ struct qlcnic_hardware_context {
        u8 phys_port_id[ETH_ALEN];
        u8 lb_mode;
        u16 vxlan_port;
+       struct device *hwmon_dev;
 };
 
 struct qlcnic_adapter_stats {
@@ -1018,6 +1019,8 @@ struct qlcnic_ipaddr {
 #define QLCNIC_DEL_VXLAN_PORT          0x200000
 #endif
 
+#define QLCNIC_VLAN_FILTERING          0x800000
+
 #define QLCNIC_IS_MSI_FAMILY(adapter) \
        ((adapter)->flags & (QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED))
 #define QLCNIC_IS_TSO_CAPABLE(adapter)  \
@@ -1692,7 +1695,7 @@ int qlcnic_read_mac_addr(struct qlcnic_adapter *);
 int qlcnic_setup_netdev(struct qlcnic_adapter *, struct net_device *, int);
 void qlcnic_set_netdev_features(struct qlcnic_adapter *,
                                struct qlcnic_esw_func_cfg *);
-void qlcnic_sriov_vf_schedule_multi(struct net_device *);
+void qlcnic_sriov_vf_set_multi(struct net_device *);
 int qlcnic_is_valid_nic_func(struct qlcnic_adapter *, u8);
 int qlcnic_get_pci_func_type(struct qlcnic_adapter *, u16, u16 *, u16 *,
                             u16 *);
@@ -1719,22 +1722,6 @@ static inline u32 qlcnic_tx_avail(struct qlcnic_host_tx_ring *tx_ring)
                                tx_ring->producer;
 }
 
-static inline int qlcnic_set_real_num_queues(struct qlcnic_adapter *adapter,
-                                            struct net_device *netdev)
-{
-       int err;
-
-       netdev->num_tx_queues = adapter->drv_tx_rings;
-       netdev->real_num_tx_queues = adapter->drv_tx_rings;
-
-       err = netif_set_real_num_tx_queues(netdev, adapter->drv_tx_rings);
-       if (err)
-               netdev_err(netdev, "failed to set %d Tx queues\n",
-                          adapter->drv_tx_rings);
-
-       return err;
-}
-
 struct qlcnic_nic_template {
        int (*config_bridged_mode) (struct qlcnic_adapter *, u32);
        int (*config_led) (struct qlcnic_adapter *, u32, u32);
@@ -2354,6 +2341,16 @@ static inline bool qlcnic_83xx_vf_check(struct qlcnic_adapter *adapter)
        return (device == PCI_DEVICE_ID_QLOGIC_VF_QLE834X) ? true : false;
 }
 
+static inline bool qlcnic_sriov_check(struct qlcnic_adapter *adapter)
+{
+       bool status;
+
+       status = (qlcnic_sriov_pf_check(adapter) ||
+                 qlcnic_sriov_vf_check(adapter)) ? true : false;
+
+       return status;
+}
+
 static inline u32 qlcnic_get_vnic_func_count(struct qlcnic_adapter *adapter)
 {
        if (qlcnic_84xx_check(adapter))
@@ -2361,4 +2358,18 @@ static inline u32 qlcnic_get_vnic_func_count(struct qlcnic_adapter *adapter)
        else
                return QLC_DEFAULT_VNIC_COUNT;
 }
+
+#ifdef CONFIG_QLCNIC_HWMON
+void qlcnic_register_hwmon_dev(struct qlcnic_adapter *);
+void qlcnic_unregister_hwmon_dev(struct qlcnic_adapter *);
+#else
+static inline void qlcnic_register_hwmon_dev(struct qlcnic_adapter *adapter)
+{
+       return;
+}
+static inline void qlcnic_unregister_hwmon_dev(struct qlcnic_adapter *adapter)
+{
+       return;
+}
+#endif
 #endif                         /* __QLCNIC_H_ */
index b7cffb46a75dbd8f215752218a6f1f4239c1cc98..a4a4ec0b68f8d5e9d7b0c6f3ed5050b5787a37c4 100644 (file)
@@ -33,6 +33,7 @@ static void qlcnic_83xx_get_beacon_state(struct qlcnic_adapter *);
 #define RSS_HASHTYPE_IP_TCP            0x3
 #define QLC_83XX_FW_MBX_CMD            0
 #define QLC_SKIP_INACTIVE_PCI_REGS     7
+#define QLC_MAX_LEGACY_FUNC_SUPP       8
 
 static const struct qlcnic_mailbox_metadata qlcnic_83xx_mbx_tbl[] = {
        {QLCNIC_CMD_CONFIGURE_IP_ADDR, 6, 1},
@@ -357,8 +358,15 @@ int qlcnic_83xx_setup_intr(struct qlcnic_adapter *adapter)
        if (!ahw->intr_tbl)
                return -ENOMEM;
 
-       if (!(adapter->flags & QLCNIC_MSIX_ENABLED))
+       if (!(adapter->flags & QLCNIC_MSIX_ENABLED)) {
+               if (adapter->ahw->pci_func >= QLC_MAX_LEGACY_FUNC_SUPP) {
+                       dev_err(&adapter->pdev->dev, "PCI function number 8 and higher are not supported with legacy interrupt, func 0x%x\n",
+                               ahw->pci_func);
+                       return -EOPNOTSUPP;
+               }
+
                qlcnic_83xx_enable_legacy(adapter);
+       }
 
        for (i = 0; i < num_msix; i++) {
                if (adapter->flags & QLCNIC_MSIX_ENABLED)
@@ -879,6 +887,9 @@ int qlcnic_83xx_alloc_mbx_args(struct qlcnic_cmd_args *mbx,
                        return 0;
                }
        }
+
+       dev_err(&adapter->pdev->dev, "%s: Invalid mailbox command opcode 0x%x\n",
+               __func__, type);
        return -EINVAL;
 }
 
@@ -3026,19 +3037,18 @@ void qlcnic_83xx_unlock_driver(struct qlcnic_adapter *adapter)
        QLCRDX(adapter->ahw, QLC_83XX_DRV_UNLOCK);
 }
 
-int qlcnic_83xx_ms_mem_write128(struct qlcnic_adapter *adapter, u64 addr,
+int qlcnic_ms_mem_write128(struct qlcnic_adapter *adapter, u64 addr,
                                u32 *data, u32 count)
 {
        int i, j, ret = 0;
        u32 temp;
-       int err = 0;
 
        /* Check alignment */
        if (addr & 0xF)
                return -EIO;
 
        mutex_lock(&adapter->ahw->mem_lock);
-       qlcnic_83xx_wrt_reg_indirect(adapter, QLCNIC_MS_ADDR_HI, 0);
+       qlcnic_ind_wr(adapter, QLCNIC_MS_ADDR_HI, 0);
 
        for (i = 0; i < count; i++, addr += 16) {
                if (!((ADDR_IN_RANGE(addr, QLCNIC_ADDR_QDR_NET,
@@ -3049,26 +3059,16 @@ int qlcnic_83xx_ms_mem_write128(struct qlcnic_adapter *adapter, u64 addr,
                        return -EIO;
                }
 
-               qlcnic_83xx_wrt_reg_indirect(adapter, QLCNIC_MS_ADDR_LO, addr);
-               qlcnic_83xx_wrt_reg_indirect(adapter, QLCNIC_MS_WRTDATA_LO,
-                                            *data++);
-               qlcnic_83xx_wrt_reg_indirect(adapter, QLCNIC_MS_WRTDATA_HI,
-                                            *data++);
-               qlcnic_83xx_wrt_reg_indirect(adapter, QLCNIC_MS_WRTDATA_ULO,
-                                            *data++);
-               qlcnic_83xx_wrt_reg_indirect(adapter, QLCNIC_MS_WRTDATA_UHI,
-                                            *data++);
-               qlcnic_83xx_wrt_reg_indirect(adapter, QLCNIC_MS_CTRL,
-                                            QLCNIC_TA_WRITE_ENABLE);
-               qlcnic_83xx_wrt_reg_indirect(adapter, QLCNIC_MS_CTRL,
-                                            QLCNIC_TA_WRITE_START);
+               qlcnic_ind_wr(adapter, QLCNIC_MS_ADDR_LO, addr);
+               qlcnic_ind_wr(adapter, QLCNIC_MS_WRTDATA_LO, *data++);
+               qlcnic_ind_wr(adapter, QLCNIC_MS_WRTDATA_HI, *data++);
+               qlcnic_ind_wr(adapter, QLCNIC_MS_WRTDATA_ULO, *data++);
+               qlcnic_ind_wr(adapter, QLCNIC_MS_WRTDATA_UHI, *data++);
+               qlcnic_ind_wr(adapter, QLCNIC_MS_CTRL, QLCNIC_TA_WRITE_ENABLE);
+               qlcnic_ind_wr(adapter, QLCNIC_MS_CTRL, QLCNIC_TA_WRITE_START);
 
                for (j = 0; j < MAX_CTL_CHECK; j++) {
-                       temp = QLCRD32(adapter, QLCNIC_MS_CTRL, &err);
-                       if (err == -EIO) {
-                               mutex_unlock(&adapter->ahw->mem_lock);
-                               return err;
-                       }
+                       temp = qlcnic_ind_rd(adapter, QLCNIC_MS_CTRL);
 
                        if ((temp & TA_CTL_BUSY) == 0)
                                break;
index 88d809c356334675026fb1a71e37107ded60c709..97784d09933f017f20a92510f3db969c0cfd746a 100644 (file)
@@ -560,7 +560,7 @@ void qlcnic_83xx_napi_del(struct qlcnic_adapter *);
 void qlcnic_83xx_napi_enable(struct qlcnic_adapter *);
 void qlcnic_83xx_napi_disable(struct qlcnic_adapter *);
 int qlcnic_83xx_config_led(struct qlcnic_adapter *, u32, u32);
-void qlcnic_ind_wr(struct qlcnic_adapter *, u32, u32);
+int qlcnic_ind_wr(struct qlcnic_adapter *, u32, u32);
 int qlcnic_ind_rd(struct qlcnic_adapter *, u32);
 int qlcnic_83xx_create_rx_ctx(struct qlcnic_adapter *);
 int qlcnic_83xx_create_tx_ctx(struct qlcnic_adapter *,
@@ -617,7 +617,6 @@ void qlcnic_83xx_idc_request_reset(struct qlcnic_adapter *, u32);
 int qlcnic_83xx_lock_driver(struct qlcnic_adapter *);
 void qlcnic_83xx_unlock_driver(struct qlcnic_adapter *);
 int qlcnic_83xx_set_default_offload_settings(struct qlcnic_adapter *);
-int qlcnic_83xx_ms_mem_write128(struct qlcnic_adapter *, u64, u32 *, u32);
 int qlcnic_83xx_idc_vnic_pf_entry(struct qlcnic_adapter *);
 int qlcnic_83xx_disable_vnic_mode(struct qlcnic_adapter *, int);
 int qlcnic_83xx_config_vnic_opmode(struct qlcnic_adapter *);
@@ -659,4 +658,5 @@ void qlcnic_83xx_cache_tmpl_hdr_values(struct qlcnic_fw_dump *);
 u32 qlcnic_83xx_get_cap_size(void *, int);
 void qlcnic_83xx_set_sys_info(void *, int, u32);
 void qlcnic_83xx_store_cap_mask(void *, u32);
+int qlcnic_ms_mem_write128(struct qlcnic_adapter *, u64, u32 *, u32);
 #endif
index ba20c721ee97f59d05f18a126471cb4a4a277f0b..f33559b725283cf69b08e80a8179fb488b89acb2 100644 (file)
@@ -1363,8 +1363,8 @@ static int qlcnic_83xx_copy_bootloader(struct qlcnic_adapter *adapter)
                return ret;
        }
        /* 16 byte write to MS memory */
-       ret = qlcnic_83xx_ms_mem_write128(adapter, dest, (u32 *)p_cache,
-                                         size / 16);
+       ret = qlcnic_ms_mem_write128(adapter, dest, (u32 *)p_cache,
+                                    size / 16);
        if (ret) {
                vfree(p_cache);
                return ret;
@@ -1389,8 +1389,8 @@ static int qlcnic_83xx_copy_fw_file(struct qlcnic_adapter *adapter)
        p_cache = (u32 *)fw->data;
        addr = (u64)dest;
 
-       ret = qlcnic_83xx_ms_mem_write128(adapter, addr,
-                                         p_cache, size / 16);
+       ret = qlcnic_ms_mem_write128(adapter, addr,
+                                    p_cache, size / 16);
        if (ret) {
                dev_err(&adapter->pdev->dev, "MS memory write failed\n");
                release_firmware(fw);
@@ -1405,8 +1405,8 @@ static int qlcnic_83xx_copy_fw_file(struct qlcnic_adapter *adapter)
                        data[i] = fw->data[size + i];
                for (; i < 16; i++)
                        data[i] = 0;
-               ret = qlcnic_83xx_ms_mem_write128(adapter, addr,
-                                                 (u32 *)data, 1);
+               ret = qlcnic_ms_mem_write128(adapter, addr,
+                                            (u32 *)data, 1);
                if (ret) {
                        dev_err(&adapter->pdev->dev,
                                "MS memory write failed\n");
@@ -2181,6 +2181,8 @@ int qlcnic_83xx_configure_opmode(struct qlcnic_adapter *adapter)
                max_sds_rings = QLCNIC_MAX_SDS_RINGS;
                max_tx_rings = QLCNIC_MAX_TX_RINGS;
        } else {
+               dev_err(&adapter->pdev->dev, "%s: Invalid opmode %d\n",
+                       __func__, ret);
                return -EIO;
        }
 
index c1e11f5715b056c0e90ba096de8f397eb50fce97..304e247bdf339c59b30c816839da1bb0ccb9a6ac 100644 (file)
@@ -1027,8 +1027,11 @@ int qlcnic_config_port_mirroring(struct qlcnic_adapter *adapter, u8 id,
        u32 arg1;
 
        if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC ||
-           !(adapter->eswitch[id].flags & QLCNIC_SWITCH_ENABLE))
+           !(adapter->eswitch[id].flags & QLCNIC_SWITCH_ENABLE)) {
+               dev_err(&adapter->pdev->dev, "%s: Not a management function\n",
+                       __func__);
                return err;
+       }
 
        arg1 = id | (enable_mirroring ? BIT_4 : 0);
        arg1 |= pci_func << 8;
@@ -1318,8 +1321,12 @@ int qlcnic_config_switch_port(struct qlcnic_adapter *adapter,
        u32 arg1, arg2 = 0;
        u8 pci_func;
 
-       if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC)
+       if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC) {
+               dev_err(&adapter->pdev->dev, "%s: Not a management function\n",
+                       __func__);
                return err;
+       }
+
        pci_func = esw_cfg->pci_func;
        index = qlcnic_is_valid_nic_func(adapter, pci_func);
        if (index < 0)
@@ -1363,6 +1370,8 @@ int qlcnic_config_switch_port(struct qlcnic_adapter *adapter,
                        arg1 &= ~(0x0ffff << 16);
                        break;
        default:
+               dev_err(&adapter->pdev->dev, "%s: Invalid opmode 0x%x\n",
+                       __func__, esw_cfg->op_mode);
                return err;
        }
 
index 9f3adf4e70b5f31a2d143c8d946ce6d88201a096..851cb4a80d50a6d4b2733ac2693fd799cca37885 100644 (file)
@@ -373,12 +373,16 @@ int qlcnic_ind_rd(struct qlcnic_adapter *adapter, u32 addr)
        return data;
 }
 
-void qlcnic_ind_wr(struct qlcnic_adapter *adapter, u32 addr, u32 data)
+int qlcnic_ind_wr(struct qlcnic_adapter *adapter, u32 addr, u32 data)
 {
+       int ret = 0;
+
        if (qlcnic_82xx_check(adapter))
                qlcnic_write_window_reg(addr, adapter->ahw->pci_base0, data);
        else
-               qlcnic_83xx_wrt_reg_indirect(adapter, addr, data);
+               ret = qlcnic_83xx_wrt_reg_indirect(adapter, addr, data);
+
+       return ret;
 }
 
 static int
@@ -567,28 +571,14 @@ static void __qlcnic_set_multi(struct net_device *netdev, u16 vlan)
 void qlcnic_set_multi(struct net_device *netdev)
 {
        struct qlcnic_adapter *adapter = netdev_priv(netdev);
-       struct qlcnic_mac_vlan_list *cur;
-       struct netdev_hw_addr *ha;
-       size_t temp;
 
        if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state))
                return;
-       if (qlcnic_sriov_vf_check(adapter)) {
-               if (!netdev_mc_empty(netdev)) {
-                       netdev_for_each_mc_addr(ha, netdev) {
-                               temp = sizeof(struct qlcnic_mac_vlan_list);
-                               cur = kzalloc(temp, GFP_ATOMIC);
-                               if (cur == NULL)
-                                       break;
-                               memcpy(cur->mac_addr,
-                                      ha->addr, ETH_ALEN);
-                               list_add_tail(&cur->list, &adapter->vf_mc_list);
-                       }
-               }
-               qlcnic_sriov_vf_schedule_multi(adapter->netdev);
-               return;
-       }
-       __qlcnic_set_multi(netdev, 0);
+
+       if (qlcnic_sriov_vf_check(adapter))
+               qlcnic_sriov_vf_set_multi(netdev);
+       else
+               __qlcnic_set_multi(netdev, 0);
 }
 
 int qlcnic_82xx_nic_set_promisc(struct qlcnic_adapter *adapter, u32 mode)
@@ -630,7 +620,7 @@ void qlcnic_prune_lb_filters(struct qlcnic_adapter *adapter)
        struct hlist_node *n;
        struct hlist_head *head;
        int i;
-       unsigned long time;
+       unsigned long expires;
        u8 cmd;
 
        for (i = 0; i < adapter->fhash.fbucket_size; i++) {
@@ -638,8 +628,8 @@ void qlcnic_prune_lb_filters(struct qlcnic_adapter *adapter)
                hlist_for_each_entry_safe(tmp_fil, n, head, fnode) {
                        cmd =  tmp_fil->vlan_id ? QLCNIC_MAC_VLAN_DEL :
                                                  QLCNIC_MAC_DEL;
-                       time = tmp_fil->ftime;
-                       if (jiffies > (QLCNIC_FILTER_AGE * HZ + time)) {
+                       expires = tmp_fil->ftime + QLCNIC_FILTER_AGE * HZ;
+                       if (time_before(expires, jiffies)) {
                                qlcnic_sre_macaddr_change(adapter,
                                                          tmp_fil->faddr,
                                                          tmp_fil->vlan_id,
@@ -657,8 +647,8 @@ void qlcnic_prune_lb_filters(struct qlcnic_adapter *adapter)
 
                hlist_for_each_entry_safe(tmp_fil, n, head, fnode)
                {
-                       time = tmp_fil->ftime;
-                       if (jiffies > (QLCNIC_FILTER_AGE * HZ + time)) {
+                       expires = tmp_fil->ftime + QLCNIC_FILTER_AGE * HZ;
+                       if (time_before(expires, jiffies)) {
                                spin_lock_bh(&adapter->rx_mac_learn_lock);
                                adapter->rx_fhash.fnum--;
                                hlist_del(&tmp_fil->fnode);
index 173b3d12991f55a62751d5e6a213d20ee02c3174..e45bf09af0c9fe4dbe9cdc629370af792ab88c01 100644 (file)
@@ -305,7 +305,6 @@ static void qlcnic_send_filter(struct qlcnic_adapter *adapter,
 {
        struct vlan_ethhdr *vh = (struct vlan_ethhdr *)(skb->data);
        struct ethhdr *phdr = (struct ethhdr *)(skb->data);
-       struct net_device *netdev = adapter->netdev;
        u16 protocol = ntohs(skb->protocol);
        struct qlcnic_filter *fil, *tmp_fil;
        struct hlist_head *head;
@@ -314,27 +313,16 @@ static void qlcnic_send_filter(struct qlcnic_adapter *adapter,
        u16 vlan_id = 0;
        u8 hindex, hval;
 
-       if (!qlcnic_sriov_pf_check(adapter)) {
-               if (ether_addr_equal(phdr->h_source, adapter->mac_addr))
-                       return;
-       } else {
+       if (ether_addr_equal(phdr->h_source, adapter->mac_addr))
+               return;
+
+       if (adapter->flags & QLCNIC_VLAN_FILTERING) {
                if (protocol == ETH_P_8021Q) {
                        vh = (struct vlan_ethhdr *)skb->data;
                        vlan_id = ntohs(vh->h_vlan_TCI);
                } else if (vlan_tx_tag_present(skb)) {
                        vlan_id = vlan_tx_tag_get(skb);
                }
-
-               if (ether_addr_equal(phdr->h_source, adapter->mac_addr) &&
-                   !vlan_id)
-                       return;
-       }
-
-       if (adapter->fhash.fnum >= adapter->fhash.fmax) {
-               adapter->stats.mac_filter_limit_overrun++;
-               netdev_info(netdev, "Can not add more than %d mac-vlan filters, configured %d\n",
-                           adapter->fhash.fmax, adapter->fhash.fnum);
-               return;
        }
 
        memcpy(&src_addr, phdr->h_source, ETH_ALEN);
@@ -353,6 +341,11 @@ static void qlcnic_send_filter(struct qlcnic_adapter *adapter,
                }
        }
 
+       if (unlikely(adapter->fhash.fnum >= adapter->fhash.fmax)) {
+               adapter->stats.mac_filter_limit_overrun++;
+               return;
+       }
+
        fil = kzalloc(sizeof(struct qlcnic_filter), GFP_ATOMIC);
        if (!fil)
                return;
@@ -1216,8 +1209,7 @@ qlcnic_process_rcv(struct qlcnic_adapter *adapter,
        if (!skb)
                return buffer;
 
-       if (adapter->drv_mac_learn &&
-           (adapter->flags & QLCNIC_ESWITCH_ENABLED)) {
+       if (adapter->rx_mac_learn) {
                t_vid = 0;
                is_lb_pkt = qlcnic_82xx_is_lb_pkt(sts_data0);
                qlcnic_add_lb_filter(adapter, skb, is_lb_pkt, t_vid);
@@ -1293,8 +1285,7 @@ qlcnic_process_lro(struct qlcnic_adapter *adapter,
        if (!skb)
                return buffer;
 
-       if (adapter->drv_mac_learn &&
-           (adapter->flags & QLCNIC_ESWITCH_ENABLED)) {
+       if (adapter->rx_mac_learn) {
                t_vid = 0;
                is_lb_pkt = qlcnic_82xx_is_lb_pkt(sts_data0);
                qlcnic_add_lb_filter(adapter, skb, is_lb_pkt, t_vid);
index dbf75393f758a153ccecfe2a8e49edb13dc8ff9a..f0a285359e6654e5a796f85a69da025006392b94 100644 (file)
@@ -378,7 +378,8 @@ static int qlcnic_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
        if (!adapter->fdb_mac_learn)
                return ndo_dflt_fdb_del(ndm, tb, netdev, addr);
 
-       if (adapter->flags & QLCNIC_ESWITCH_ENABLED) {
+       if ((adapter->flags & QLCNIC_ESWITCH_ENABLED) ||
+           qlcnic_sriov_check(adapter)) {
                if (is_unicast_ether_addr(addr)) {
                        err = dev_uc_del(netdev, addr);
                        if (!err)
@@ -402,7 +403,8 @@ static int qlcnic_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
        if (!adapter->fdb_mac_learn)
                return ndo_dflt_fdb_add(ndm, tb, netdev, addr, flags);
 
-       if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED)) {
+       if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED) &&
+           !qlcnic_sriov_check(adapter)) {
                pr_info("%s: FDB e-switch is not enabled\n", __func__);
                return -EOPNOTSUPP;
        }
@@ -432,7 +434,8 @@ static int qlcnic_fdb_dump(struct sk_buff *skb, struct netlink_callback *ncb,
        if (!adapter->fdb_mac_learn)
                return ndo_dflt_fdb_dump(skb, ncb, netdev, idx);
 
-       if (adapter->flags & QLCNIC_ESWITCH_ENABLED)
+       if ((adapter->flags & QLCNIC_ESWITCH_ENABLED) ||
+           qlcnic_sriov_check(adapter))
                idx = ndo_dflt_fdb_dump(skb, ncb, netdev, idx);
 
        return idx;
@@ -690,10 +693,10 @@ int qlcnic_setup_tss_rss_intr(struct qlcnic_adapter *adapter)
                adapter->msix_entries[vector].entry = vector;
 
 restore:
-       err = pci_enable_msix(pdev, adapter->msix_entries, num_msix);
-       if (err > 0) {
+       err = pci_enable_msix_exact(pdev, adapter->msix_entries, num_msix);
+       if (err == -ENOSPC) {
                if (!adapter->drv_tss_rings && !adapter->drv_rss_rings)
-                       return -ENOSPC;
+                       return err;
 
                netdev_info(adapter->netdev,
                            "Unable to allocate %d MSI-X vectors, Available vectors %d\n",
@@ -1014,6 +1017,8 @@ int qlcnic_init_pci_info(struct qlcnic_adapter *adapter)
 
                if (pfn >= ahw->max_vnic_func) {
                        ret = QL_STATUS_INVALID_PARAM;
+                       dev_err(&adapter->pdev->dev, "%s: Invalid function 0x%x, max 0x%x\n",
+                               __func__, pfn, ahw->max_vnic_func);
                        goto err_eswitch;
                }
 
@@ -1915,8 +1920,6 @@ void __qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
        if (!test_and_clear_bit(__QLCNIC_DEV_UP, &adapter->state))
                return;
 
-       if (qlcnic_sriov_vf_check(adapter))
-               qlcnic_sriov_cleanup_async_list(&adapter->ahw->sriov->bc);
        smp_mb();
        netif_carrier_off(netdev);
        adapter->ahw->linkup = 0;
@@ -1928,6 +1931,8 @@ void __qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
                qlcnic_delete_lb_filters(adapter);
 
        qlcnic_nic_set_promisc(adapter, QLCNIC_NIU_NON_PROMISC_MODE);
+       if (qlcnic_sriov_vf_check(adapter))
+               qlcnic_sriov_cleanup_async_list(&adapter->ahw->sriov->bc);
 
        qlcnic_napi_disable(adapter);
 
@@ -2052,6 +2057,7 @@ out:
 
 static int qlcnic_alloc_adapter_resources(struct qlcnic_adapter *adapter)
 {
+       struct qlcnic_hardware_context *ahw = adapter->ahw;
        int err = 0;
 
        adapter->recv_ctx = kzalloc(sizeof(struct qlcnic_recv_context),
@@ -2061,6 +2067,18 @@ static int qlcnic_alloc_adapter_resources(struct qlcnic_adapter *adapter)
                goto err_out;
        }
 
+       if (qlcnic_83xx_check(adapter)) {
+               ahw->coal.type = QLCNIC_INTR_COAL_TYPE_RX_TX;
+               ahw->coal.tx_time_us = QLCNIC_DEF_INTR_COALESCE_TX_TIME_US;
+               ahw->coal.tx_packets = QLCNIC_DEF_INTR_COALESCE_TX_PACKETS;
+               ahw->coal.rx_time_us = QLCNIC_DEF_INTR_COALESCE_RX_TIME_US;
+               ahw->coal.rx_packets = QLCNIC_DEF_INTR_COALESCE_RX_PACKETS;
+       } else {
+               ahw->coal.type = QLCNIC_INTR_COAL_TYPE_RX;
+               ahw->coal.rx_time_us = QLCNIC_DEF_INTR_COALESCE_RX_TIME_US;
+               ahw->coal.rx_packets = QLCNIC_DEF_INTR_COALESCE_RX_PACKETS;
+       }
+
        /* clear stats */
        memset(&adapter->stats, 0, sizeof(adapter->stats));
 err_out:
@@ -2206,6 +2224,31 @@ static void qlcnic_82xx_set_mac_filter_count(struct qlcnic_adapter *adapter)
        ahw->max_uc_count = count;
 }
 
+static int qlcnic_set_real_num_queues(struct qlcnic_adapter *adapter,
+                                     u8 tx_queues, u8 rx_queues)
+{
+       struct net_device *netdev = adapter->netdev;
+       int err = 0;
+
+       if (tx_queues) {
+               err = netif_set_real_num_tx_queues(netdev, tx_queues);
+               if (err) {
+                       netdev_err(netdev, "failed to set %d Tx queues\n",
+                                  tx_queues);
+                       return err;
+               }
+       }
+
+       if (rx_queues) {
+               err = netif_set_real_num_rx_queues(netdev, rx_queues);
+               if (err)
+                       netdev_err(netdev, "failed to set %d Rx queues\n",
+                                  rx_queues);
+       }
+
+       return err;
+}
+
 int
 qlcnic_setup_netdev(struct qlcnic_adapter *adapter, struct net_device *netdev,
                    int pci_using_dac)
@@ -2222,10 +2265,8 @@ qlcnic_setup_netdev(struct qlcnic_adapter *adapter, struct net_device *netdev,
 
        qlcnic_change_mtu(netdev, netdev->mtu);
 
-       if (qlcnic_sriov_vf_check(adapter))
-               SET_ETHTOOL_OPS(netdev, &qlcnic_sriov_vf_ethtool_ops);
-       else
-               SET_ETHTOOL_OPS(netdev, &qlcnic_ethtool_ops);
+       netdev->ethtool_ops = (qlcnic_sriov_vf_check(adapter)) ?
+               &qlcnic_sriov_vf_ethtool_ops : &qlcnic_ethtool_ops;
 
        netdev->features |= (NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
                             NETIF_F_IPV6_CSUM | NETIF_F_GRO |
@@ -2269,7 +2310,8 @@ qlcnic_setup_netdev(struct qlcnic_adapter *adapter, struct net_device *netdev,
        netdev->priv_flags |= IFF_UNICAST_FLT;
        netdev->irq = adapter->msix_entries[0].vector;
 
-       err = qlcnic_set_real_num_queues(adapter, netdev);
+       err = qlcnic_set_real_num_queues(adapter, adapter->drv_tx_rings,
+                                        adapter->drv_sds_rings);
        if (err)
                return err;
 
@@ -2374,6 +2416,14 @@ void qlcnic_set_drv_version(struct qlcnic_adapter *adapter)
                qlcnic_fw_cmd_set_drv_version(adapter, fw_cmd);
 }
 
+/* Reset firmware API lock */
+static void qlcnic_reset_api_lock(struct qlcnic_adapter *adapter)
+{
+       qlcnic_api_lock(adapter);
+       qlcnic_api_unlock(adapter);
+}
+
+
 static int
 qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 {
@@ -2383,9 +2433,6 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        int err, pci_using_dac = -1;
        char board_name[QLCNIC_MAX_BOARD_NAME_LEN + 19]; /* MAC + ": " + name */
 
-       if (pdev->is_virtfn)
-               return -ENODEV;
-
        err = pci_enable_device(pdev);
        if (err)
                return err;
@@ -2476,6 +2523,7 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        if (qlcnic_82xx_check(adapter)) {
                qlcnic_check_vf(adapter, ent);
                adapter->portnum = adapter->ahw->pci_func;
+               qlcnic_reset_api_lock(adapter);
                err = qlcnic_start_firmware(adapter);
                if (err) {
                        dev_err(&pdev->dev, "Loading fw failed.Please Reboot\n"
@@ -2517,9 +2565,11 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                        case -ENOMEM:
                                dev_err(&pdev->dev, "Adapter initialization failed. Please reboot\n");
                                goto err_out_free_hw;
+                       case -EOPNOTSUPP:
+                               dev_err(&pdev->dev, "Adapter initialization failed\n");
+                               goto err_out_free_hw;
                        default:
-                               dev_err(&pdev->dev, "Adapter initialization failed. A reboot may be required to recover from this failure\n");
-                               dev_err(&pdev->dev, "If reboot does not help to recover from this failure, try a flash update of the adapter\n");
+                               dev_err(&pdev->dev, "Adapter initialization failed. Driver will load in maintenance mode to recover the adapter using the application\n");
                                goto err_out_maintenance_mode;
                        }
                }
@@ -2593,7 +2643,7 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                qlcnic_alloc_lb_filters_mem(adapter);
 
        qlcnic_add_sysfs(adapter);
-
+       qlcnic_register_hwmon_dev(adapter);
        return 0;
 
 err_out_disable_mbx_intr:
@@ -2630,7 +2680,7 @@ err_out_disable_pdev:
 err_out_maintenance_mode:
        set_bit(__QLCNIC_MAINTENANCE_MODE, &adapter->state);
        netdev->netdev_ops = &qlcnic_netdev_failed_ops;
-       SET_ETHTOOL_OPS(netdev, &qlcnic_ethtool_failed_ops);
+       netdev->ethtool_ops = &qlcnic_ethtool_failed_ops;
        ahw->port_type = QLCNIC_XGBE;
 
        if (qlcnic_83xx_check(adapter))
@@ -2663,9 +2713,9 @@ static void qlcnic_remove(struct pci_dev *pdev)
                return;
 
        netdev = adapter->netdev;
-       qlcnic_sriov_pf_disable(adapter);
 
        qlcnic_cancel_idc_work(adapter);
+       qlcnic_sriov_pf_disable(adapter);
        ahw = adapter->ahw;
 
        unregister_netdev(netdev);
@@ -2700,6 +2750,8 @@ static void qlcnic_remove(struct pci_dev *pdev)
 
        qlcnic_remove_sysfs(adapter);
 
+       qlcnic_unregister_hwmon_dev(adapter);
+
        qlcnic_cleanup_pci_map(adapter->ahw);
 
        qlcnic_release_firmware(adapter);
@@ -2793,6 +2845,8 @@ static int qlcnic_close(struct net_device *netdev)
        return 0;
 }
 
+#define QLCNIC_VF_LB_BUCKET_SIZE 1
+
 void qlcnic_alloc_lb_filters_mem(struct qlcnic_adapter *adapter)
 {
        void *head;
@@ -2808,7 +2862,10 @@ void qlcnic_alloc_lb_filters_mem(struct qlcnic_adapter *adapter)
        spin_lock_init(&adapter->mac_learn_lock);
        spin_lock_init(&adapter->rx_mac_learn_lock);
 
-       if (qlcnic_82xx_check(adapter)) {
+       if (qlcnic_sriov_vf_check(adapter)) {
+               filter_size = QLCNIC_83XX_SRIOV_VF_MAX_MAC - 1;
+               adapter->fhash.fbucket_size = QLCNIC_VF_LB_BUCKET_SIZE;
+       } else if (qlcnic_82xx_check(adapter)) {
                filter_size = QLCNIC_LB_MAX_FILTERS;
                adapter->fhash.fbucket_size = QLCNIC_LB_BUCKET_SIZE;
        } else {
@@ -2934,9 +2991,13 @@ static void qlcnic_dump_tx_rings(struct qlcnic_adapter *adapter)
                            tx_ring->tx_stats.xmit_called,
                            tx_ring->tx_stats.xmit_on,
                            tx_ring->tx_stats.xmit_off);
+
+               if (tx_ring->crb_intr_mask)
+                       netdev_info(netdev, "crb_intr_mask=%d\n",
+                                   readl(tx_ring->crb_intr_mask));
+
                netdev_info(netdev,
-                           "crb_intr_mask=%d, hw_producer=%d, sw_producer=%d sw_consumer=%d, hw_consumer=%d\n",
-                           readl(tx_ring->crb_intr_mask),
+                           "hw_producer=%d, sw_producer=%d sw_consumer=%d, hw_consumer=%d\n",
                            readl(tx_ring->crb_cmd_producer),
                            tx_ring->producer, tx_ring->sw_consumer,
                            le32_to_cpu(*(tx_ring->hw_consumer)));
@@ -3969,12 +4030,21 @@ int qlcnic_validate_rings(struct qlcnic_adapter *adapter, __u32 ring_cnt,
 int qlcnic_setup_rings(struct qlcnic_adapter *adapter)
 {
        struct net_device *netdev = adapter->netdev;
+       u8 tx_rings, rx_rings;
        int err;
 
        if (test_bit(__QLCNIC_RESETTING, &adapter->state))
                return -EBUSY;
 
+       tx_rings = adapter->drv_tss_rings;
+       rx_rings = adapter->drv_rss_rings;
+
        netif_device_detach(netdev);
+
+       err = qlcnic_set_real_num_queues(adapter, tx_rings, rx_rings);
+       if (err)
+               goto done;
+
        if (netif_running(netdev))
                __qlcnic_down(adapter, netdev);
 
@@ -3994,7 +4064,17 @@ int qlcnic_setup_rings(struct qlcnic_adapter *adapter)
                return err;
        }
 
-       netif_set_real_num_tx_queues(netdev, adapter->drv_tx_rings);
+       /* Check if we need to update real_num_{tx|rx}_queues because
+        * qlcnic_setup_intr() may change Tx/Rx rings size
+        */
+       if ((tx_rings != adapter->drv_tx_rings) ||
+           (rx_rings != adapter->drv_sds_rings)) {
+               err = qlcnic_set_real_num_queues(adapter,
+                                                adapter->drv_tx_rings,
+                                                adapter->drv_sds_rings);
+               if (err)
+                       goto done;
+       }
 
        if (qlcnic_83xx_check(adapter)) {
                qlcnic_83xx_initialize_nic(adapter, 1);
@@ -4064,7 +4144,7 @@ void qlcnic_restore_indev_addr(struct net_device *netdev, unsigned long event)
 
        rcu_read_lock();
        for_each_set_bit(vid, adapter->vlans, VLAN_N_VID) {
-               dev = __vlan_find_dev_deep(netdev, htons(ETH_P_8021Q), vid);
+               dev = __vlan_find_dev_deep_rcu(netdev, htons(ETH_P_8021Q), vid);
                if (!dev)
                        continue;
                qlcnic_config_indev_addr(adapter, dev, event);
index 37b979b1266bc2e0aa552f84dd8a14a149c82665..f7694da8ed5dcbbc5ed0124427f7b303a91c23f3 100644 (file)
@@ -238,6 +238,8 @@ void qlcnic_82xx_cache_tmpl_hdr_values(struct qlcnic_fw_dump *fw_dump)
 
        hdr->drv_cap_mask = hdr->cap_mask;
        fw_dump->cap_mask = hdr->cap_mask;
+
+       fw_dump->use_pex_dma = (hdr->capabilities & BIT_0) ? true : false;
 }
 
 inline u32 qlcnic_82xx_get_cap_size(void *t_hdr, int index)
@@ -276,6 +278,8 @@ inline void qlcnic_83xx_set_saved_state(void *t_hdr, u32 index,
        hdr->saved_state[index] = value;
 }
 
+#define QLCNIC_TEMPLATE_VERSION (0x20001)
+
 void qlcnic_83xx_cache_tmpl_hdr_values(struct qlcnic_fw_dump *fw_dump)
 {
        struct qlcnic_83xx_dump_template_hdr *hdr;
@@ -288,6 +292,9 @@ void qlcnic_83xx_cache_tmpl_hdr_values(struct qlcnic_fw_dump *fw_dump)
 
        hdr->drv_cap_mask = hdr->cap_mask;
        fw_dump->cap_mask = hdr->cap_mask;
+
+       fw_dump->use_pex_dma = (fw_dump->version & 0xfffff) >=
+                              QLCNIC_TEMPLATE_VERSION;
 }
 
 inline u32 qlcnic_83xx_get_cap_size(void *t_hdr, int index)
@@ -658,29 +665,28 @@ out:
 static int qlcnic_start_pex_dma(struct qlcnic_adapter *adapter,
                                struct __mem *mem)
 {
-       struct qlcnic_83xx_dump_template_hdr *tmpl_hdr;
        struct device *dev = &adapter->pdev->dev;
        u32 dma_no, dma_base_addr, temp_addr;
        int i, ret, dma_sts;
+       void *tmpl_hdr;
 
        tmpl_hdr = adapter->ahw->fw_dump.tmpl_hdr;
-       dma_no = tmpl_hdr->saved_state[QLC_83XX_DMA_ENGINE_INDEX];
+       dma_no = qlcnic_get_saved_state(adapter, tmpl_hdr,
+                                       QLC_83XX_DMA_ENGINE_INDEX);
        dma_base_addr = QLC_DMA_REG_BASE_ADDR(dma_no);
 
        temp_addr = dma_base_addr + QLC_DMA_CMD_BUFF_ADDR_LOW;
-       ret = qlcnic_83xx_wrt_reg_indirect(adapter, temp_addr,
-                                          mem->desc_card_addr);
+       ret = qlcnic_ind_wr(adapter, temp_addr, mem->desc_card_addr);
        if (ret)
                return ret;
 
        temp_addr = dma_base_addr + QLC_DMA_CMD_BUFF_ADDR_HI;
-       ret = qlcnic_83xx_wrt_reg_indirect(adapter, temp_addr, 0);
+       ret = qlcnic_ind_wr(adapter, temp_addr, 0);
        if (ret)
                return ret;
 
        temp_addr = dma_base_addr + QLC_DMA_CMD_STATUS_CTRL;
-       ret = qlcnic_83xx_wrt_reg_indirect(adapter, temp_addr,
-                                          mem->start_dma_cmd);
+       ret = qlcnic_ind_wr(adapter, temp_addr, mem->start_dma_cmd);
        if (ret)
                return ret;
 
@@ -710,15 +716,16 @@ static u32 qlcnic_read_memory_pexdma(struct qlcnic_adapter *adapter,
        struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
        u32 temp, dma_base_addr, size = 0, read_size = 0;
        struct qlcnic_pex_dma_descriptor *dma_descr;
-       struct qlcnic_83xx_dump_template_hdr *tmpl_hdr;
        struct device *dev = &adapter->pdev->dev;
        dma_addr_t dma_phys_addr;
        void *dma_buffer;
+       void *tmpl_hdr;
 
        tmpl_hdr = fw_dump->tmpl_hdr;
 
        /* Check if DMA engine is available */
-       temp = tmpl_hdr->saved_state[QLC_83XX_DMA_ENGINE_INDEX];
+       temp = qlcnic_get_saved_state(adapter, tmpl_hdr,
+                                     QLC_83XX_DMA_ENGINE_INDEX);
        dma_base_addr = QLC_DMA_REG_BASE_ADDR(temp);
        temp = qlcnic_ind_rd(adapter,
                             dma_base_addr + QLC_DMA_CMD_STATUS_CTRL);
@@ -764,8 +771,8 @@ static u32 qlcnic_read_memory_pexdma(struct qlcnic_adapter *adapter,
 
                /* Write DMA descriptor to MS memory*/
                temp = sizeof(struct qlcnic_pex_dma_descriptor) / 16;
-               *ret = qlcnic_83xx_ms_mem_write128(adapter, mem->desc_card_addr,
-                                                  (u32 *)dma_descr, temp);
+               *ret = qlcnic_ms_mem_write128(adapter, mem->desc_card_addr,
+                                             (u32 *)dma_descr, temp);
                if (*ret) {
                        dev_info(dev, "Failed to write DMA descriptor to MS memory at address 0x%x\n",
                                 mem->desc_card_addr);
@@ -1141,8 +1148,6 @@ free_mem:
        return err;
 }
 
-#define QLCNIC_TEMPLATE_VERSION (0x20001)
-
 int qlcnic_fw_cmd_get_minidump_temp(struct qlcnic_adapter *adapter)
 {
        struct qlcnic_hardware_context *ahw;
@@ -1203,12 +1208,6 @@ flash_temp:
                 "Default minidump capture mask 0x%x\n",
                 fw_dump->cap_mask);
 
-       if (qlcnic_83xx_check(adapter) &&
-           (fw_dump->version & 0xfffff) >= QLCNIC_TEMPLATE_VERSION)
-               fw_dump->use_pex_dma = true;
-       else
-               fw_dump->use_pex_dma = false;
-
        qlcnic_enable_fw_dump_state(adapter);
 
        return 0;
index 396bd1fd1d277deb56d70e185280ed3172902a49..335b50f7bd3eb99f7ee3e5bf9ce65babb56174ed 100644 (file)
@@ -52,6 +52,7 @@ enum qlcnic_bc_commands {
        QLCNIC_BC_CMD_CFG_GUEST_VLAN = 0x3,
 };
 
+#define QLCNIC_83XX_SRIOV_VF_MAX_MAC 2
 #define QLC_BC_CMD 1
 
 struct qlcnic_trans_list {
@@ -151,13 +152,14 @@ struct qlcnic_vf_info {
        struct qlcnic_trans_list        rcv_pend;
        struct qlcnic_adapter           *adapter;
        struct qlcnic_vport             *vp;
-       struct mutex                    vlan_list_lock; /* Lock for VLAN list */
+       spinlock_t                      vlan_list_lock; /* Lock for VLAN list */
 };
 
 struct qlcnic_async_work_list {
        struct list_head        list;
        struct work_struct      work;
        void                    *ptr;
+       struct qlcnic_cmd_args  *cmd;
 };
 
 struct qlcnic_back_channel {
index 0638c1810d54547df9eafb961439085dff4364a2..498fa6350c8d68ba4047976820e9a0158761f530 100644 (file)
@@ -39,6 +39,8 @@ static int qlcnic_sriov_channel_cfg_cmd(struct qlcnic_adapter *, u8);
 static void qlcnic_sriov_process_bc_cmd(struct work_struct *);
 static int qlcnic_sriov_vf_shutdown(struct pci_dev *);
 static int qlcnic_sriov_vf_resume(struct qlcnic_adapter *);
+static int qlcnic_sriov_async_issue_cmd(struct qlcnic_adapter *,
+                                       struct qlcnic_cmd_args *);
 
 static struct qlcnic_hardware_ops qlcnic_sriov_vf_hw_ops = {
        .read_crb                       = qlcnic_83xx_read_crb,
@@ -181,7 +183,7 @@ int qlcnic_sriov_init(struct qlcnic_adapter *adapter, int num_vfs)
                vf->adapter = adapter;
                vf->pci_func = qlcnic_sriov_virtid_fn(adapter, i);
                mutex_init(&vf->send_cmd_lock);
-               mutex_init(&vf->vlan_list_lock);
+               spin_lock_init(&vf->vlan_list_lock);
                INIT_LIST_HEAD(&vf->rcv_act.wait_list);
                INIT_LIST_HEAD(&vf->rcv_pend.wait_list);
                spin_lock_init(&vf->rcv_act.lock);
@@ -197,8 +199,9 @@ int qlcnic_sriov_init(struct qlcnic_adapter *adapter, int num_vfs)
                                goto qlcnic_destroy_async_wq;
                        }
                        sriov->vf_info[i].vp = vp;
+                       vp->vlan_mode = QLC_GUEST_VLAN_MODE;
                        vp->max_tx_bw = MAX_BW;
-                       vp->spoofchk = true;
+                       vp->spoofchk = false;
                        random_ether_addr(vp->mac);
                        dev_info(&adapter->pdev->dev,
                                 "MAC Address %pM is configured for VF %d\n",
@@ -515,6 +518,8 @@ static int qlcnic_sriov_setup_vf(struct qlcnic_adapter *adapter,
 {
        int err;
 
+       adapter->flags |= QLCNIC_VLAN_FILTERING;
+       adapter->ahw->total_nic_func = 1;
        INIT_LIST_HEAD(&adapter->vf_mc_list);
        if (!qlcnic_use_msi_x && !!qlcnic_use_msi)
                dev_warn(&adapter->pdev->dev,
@@ -770,6 +775,7 @@ static int qlcnic_sriov_prepare_bc_hdr(struct qlcnic_bc_trans *trans,
                cmd->req.arg = (u32 *)trans->req_pay;
                cmd->rsp.arg = (u32 *)trans->rsp_pay;
                cmd_op = cmd->req.arg[0] & 0xff;
+               cmd->cmd_op = cmd_op;
                remainder = (trans->rsp_pay_size) % (bc_pay_sz);
                num_frags = (trans->rsp_pay_size) / (bc_pay_sz);
                if (remainder)
@@ -1356,7 +1362,7 @@ static int qlcnic_sriov_retry_bc_cmd(struct qlcnic_adapter *adapter,
        return -EIO;
 }
 
-static int qlcnic_sriov_issue_cmd(struct qlcnic_adapter *adapter,
+static int __qlcnic_sriov_issue_cmd(struct qlcnic_adapter *adapter,
                                  struct qlcnic_cmd_args *cmd)
 {
        struct qlcnic_hardware_context *ahw = adapter->ahw;
@@ -1370,7 +1376,7 @@ static int qlcnic_sriov_issue_cmd(struct qlcnic_adapter *adapter,
 
        rsp = qlcnic_sriov_alloc_bc_trans(&trans);
        if (rsp)
-               return rsp;
+               goto free_cmd;
 
        rsp = qlcnic_sriov_prepare_bc_hdr(trans, cmd, seq, QLC_BC_COMMAND);
        if (rsp)
@@ -1408,12 +1414,17 @@ retry:
            (mbx_err_code == QLCNIC_MBX_PORT_RSP_OK)) {
                rsp = QLCNIC_RCODE_SUCCESS;
        } else {
-               rsp = mbx_err_code;
-               if (!rsp)
-                       rsp = 1;
-               dev_err(dev,
-                       "MBX command 0x%x failed with err:0x%x for VF %d\n",
-                       opcode, mbx_err_code, func);
+               if (cmd->type == QLC_83XX_MBX_CMD_NO_WAIT) {
+                       rsp = QLCNIC_RCODE_SUCCESS;
+               } else {
+                       rsp = mbx_err_code;
+                       if (!rsp)
+                               rsp = 1;
+
+                       dev_err(dev,
+                               "MBX command 0x%x failed with err:0x%x for VF %d\n",
+                               opcode, mbx_err_code, func);
+               }
        }
 
 err_out:
@@ -1425,9 +1436,26 @@ err_out:
 
 cleanup_transaction:
        qlcnic_sriov_cleanup_transaction(trans);
+
+free_cmd:
+       if (cmd->type == QLC_83XX_MBX_CMD_NO_WAIT) {
+               qlcnic_free_mbx_args(cmd);
+               kfree(cmd);
+       }
+
        return rsp;
 }
 
+
+static int qlcnic_sriov_issue_cmd(struct qlcnic_adapter *adapter,
+                                 struct qlcnic_cmd_args *cmd)
+{
+       if (cmd->type == QLC_83XX_MBX_CMD_NO_WAIT)
+               return qlcnic_sriov_async_issue_cmd(adapter, cmd);
+       else
+               return __qlcnic_sriov_issue_cmd(adapter, cmd);
+}
+
 static int qlcnic_sriov_channel_cfg_cmd(struct qlcnic_adapter *adapter, u8 cmd_op)
 {
        struct qlcnic_cmd_args cmd;
@@ -1458,58 +1486,28 @@ out:
        return ret;
 }
 
-static void qlcnic_vf_add_mc_list(struct net_device *netdev)
+static void qlcnic_vf_add_mc_list(struct net_device *netdev, const u8 *mac)
 {
        struct qlcnic_adapter *adapter = netdev_priv(netdev);
        struct qlcnic_sriov *sriov = adapter->ahw->sriov;
-       struct qlcnic_mac_vlan_list *cur;
-       struct list_head *head, tmp_list;
        struct qlcnic_vf_info *vf;
        u16 vlan_id;
        int i;
 
-       static const u8 bcast_addr[ETH_ALEN] = {
-               0xff, 0xff, 0xff, 0xff, 0xff, 0xff
-       };
-
        vf = &adapter->ahw->sriov->vf_info[0];
-       INIT_LIST_HEAD(&tmp_list);
-       head = &adapter->vf_mc_list;
-       netif_addr_lock_bh(netdev);
-
-       while (!list_empty(head)) {
-               cur = list_entry(head->next, struct qlcnic_mac_vlan_list, list);
-               list_move(&cur->list, &tmp_list);
-       }
-
-       netif_addr_unlock_bh(netdev);
 
-       while (!list_empty(&tmp_list)) {
-               cur = list_entry((&tmp_list)->next,
-                                struct qlcnic_mac_vlan_list, list);
-               if (!qlcnic_sriov_check_any_vlan(vf)) {
-                       qlcnic_nic_add_mac(adapter, bcast_addr, 0);
-                       qlcnic_nic_add_mac(adapter, cur->mac_addr, 0);
-               } else {
-                       mutex_lock(&vf->vlan_list_lock);
-                       for (i = 0; i < sriov->num_allowed_vlans; i++) {
-                               vlan_id = vf->sriov_vlans[i];
-                               if (vlan_id) {
-                                       qlcnic_nic_add_mac(adapter, bcast_addr,
-                                                          vlan_id);
-                                       qlcnic_nic_add_mac(adapter,
-                                                          cur->mac_addr,
-                                                          vlan_id);
-                               }
-                       }
-                       mutex_unlock(&vf->vlan_list_lock);
-                       if (qlcnic_84xx_check(adapter)) {
-                               qlcnic_nic_add_mac(adapter, bcast_addr, 0);
-                               qlcnic_nic_add_mac(adapter, cur->mac_addr, 0);
-                       }
+       if (!qlcnic_sriov_check_any_vlan(vf)) {
+               qlcnic_nic_add_mac(adapter, mac, 0);
+       } else {
+               spin_lock(&vf->vlan_list_lock);
+               for (i = 0; i < sriov->num_allowed_vlans; i++) {
+                       vlan_id = vf->sriov_vlans[i];
+                       if (vlan_id)
+                               qlcnic_nic_add_mac(adapter, mac, vlan_id);
                }
-               list_del(&cur->list);
-               kfree(cur);
+               spin_unlock(&vf->vlan_list_lock);
+               if (qlcnic_84xx_check(adapter))
+                       qlcnic_nic_add_mac(adapter, mac, 0);
        }
 }
 
@@ -1518,6 +1516,7 @@ void qlcnic_sriov_cleanup_async_list(struct qlcnic_back_channel *bc)
        struct list_head *head = &bc->async_list;
        struct qlcnic_async_work_list *entry;
 
+       flush_workqueue(bc->bc_async_wq);
        while (!list_empty(head)) {
                entry = list_entry(head->next, struct qlcnic_async_work_list,
                                   list);
@@ -1527,10 +1526,14 @@ void qlcnic_sriov_cleanup_async_list(struct qlcnic_back_channel *bc)
        }
 }
 
-static void qlcnic_sriov_vf_set_multi(struct net_device *netdev)
+void qlcnic_sriov_vf_set_multi(struct net_device *netdev)
 {
        struct qlcnic_adapter *adapter = netdev_priv(netdev);
        struct qlcnic_hardware_context *ahw = adapter->ahw;
+       static const u8 bcast_addr[ETH_ALEN] = {
+               0xff, 0xff, 0xff, 0xff, 0xff, 0xff
+       };
+       struct netdev_hw_addr *ha;
        u32 mode = VPORT_MISS_MODE_DROP;
 
        if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state))
@@ -1542,23 +1545,49 @@ static void qlcnic_sriov_vf_set_multi(struct net_device *netdev)
        } else if ((netdev->flags & IFF_ALLMULTI) ||
                   (netdev_mc_count(netdev) > ahw->max_mc_count)) {
                mode = VPORT_MISS_MODE_ACCEPT_MULTI;
+       } else {
+               qlcnic_vf_add_mc_list(netdev, bcast_addr);
+               if (!netdev_mc_empty(netdev)) {
+                       netdev_for_each_mc_addr(ha, netdev)
+                               qlcnic_vf_add_mc_list(netdev, ha->addr);
+               }
        }
 
-       if (qlcnic_sriov_vf_check(adapter))
-               qlcnic_vf_add_mc_list(netdev);
+       /* configure unicast MAC address, if there is not sufficient space
+        * to store all the unicast addresses then enable promiscuous mode
+        */
+       if (netdev_uc_count(netdev) > ahw->max_uc_count) {
+               mode = VPORT_MISS_MODE_ACCEPT_ALL;
+       } else if (!netdev_uc_empty(netdev)) {
+               netdev_for_each_uc_addr(ha, netdev)
+                       qlcnic_vf_add_mc_list(netdev, ha->addr);
+       }
+
+       if (adapter->pdev->is_virtfn) {
+               if (mode == VPORT_MISS_MODE_ACCEPT_ALL &&
+                   !adapter->fdb_mac_learn) {
+                       qlcnic_alloc_lb_filters_mem(adapter);
+                       adapter->drv_mac_learn = 1;
+                       adapter->rx_mac_learn = true;
+               } else {
+                       adapter->drv_mac_learn = 0;
+                       adapter->rx_mac_learn = false;
+               }
+       }
 
        qlcnic_nic_set_promisc(adapter, mode);
 }
 
-static void qlcnic_sriov_handle_async_multi(struct work_struct *work)
+static void qlcnic_sriov_handle_async_issue_cmd(struct work_struct *work)
 {
        struct qlcnic_async_work_list *entry;
-       struct net_device *netdev;
+       struct qlcnic_adapter *adapter;
+       struct qlcnic_cmd_args *cmd;
 
        entry = container_of(work, struct qlcnic_async_work_list, work);
-       netdev = (struct net_device *)entry->ptr;
-
-       qlcnic_sriov_vf_set_multi(netdev);
+       adapter = entry->ptr;
+       cmd = entry->cmd;
+       __qlcnic_sriov_issue_cmd(adapter, cmd);
        return;
 }
 
@@ -1588,8 +1617,9 @@ qlcnic_sriov_get_free_node_async_work(struct qlcnic_back_channel *bc)
        return entry;
 }
 
-static void qlcnic_sriov_schedule_bc_async_work(struct qlcnic_back_channel *bc,
-                                               work_func_t func, void *data)
+static void qlcnic_sriov_schedule_async_cmd(struct qlcnic_back_channel *bc,
+                                           work_func_t func, void *data,
+                                           struct qlcnic_cmd_args *cmd)
 {
        struct qlcnic_async_work_list *entry = NULL;
 
@@ -1598,21 +1628,23 @@ static void qlcnic_sriov_schedule_bc_async_work(struct qlcnic_back_channel *bc,
                return;
 
        entry->ptr = data;
+       entry->cmd = cmd;
        INIT_WORK(&entry->work, func);
        queue_work(bc->bc_async_wq, &entry->work);
 }
 
-void qlcnic_sriov_vf_schedule_multi(struct net_device *netdev)
+static int qlcnic_sriov_async_issue_cmd(struct qlcnic_adapter *adapter,
+                                       struct qlcnic_cmd_args *cmd)
 {
 
-       struct qlcnic_adapter *adapter = netdev_priv(netdev);
        struct qlcnic_back_channel *bc = &adapter->ahw->sriov->bc;
 
        if (adapter->need_fw_reset)
-               return;
+               return -EIO;
 
-       qlcnic_sriov_schedule_bc_async_work(bc, qlcnic_sriov_handle_async_multi,
-                                           netdev);
+       qlcnic_sriov_schedule_async_cmd(bc, qlcnic_sriov_handle_async_issue_cmd,
+                                       adapter, cmd);
+       return 0;
 }
 
 static int qlcnic_sriov_vf_reinit_driver(struct qlcnic_adapter *adapter)
@@ -1836,6 +1868,12 @@ static int qlcnic_sriov_vf_idc_unknown_state(struct qlcnic_adapter *adapter)
        return 0;
 }
 
+static void qlcnic_sriov_vf_periodic_tasks(struct qlcnic_adapter *adapter)
+{
+       if (adapter->fhash.fnum)
+               qlcnic_prune_lb_filters(adapter);
+}
+
 static void qlcnic_sriov_vf_poll_dev_state(struct work_struct *work)
 {
        struct qlcnic_adapter *adapter;
@@ -1867,6 +1905,8 @@ static void qlcnic_sriov_vf_poll_dev_state(struct work_struct *work)
        }
 
        idc->prev_state = idc->curr_state;
+       qlcnic_sriov_vf_periodic_tasks(adapter);
+
        if (!ret && test_bit(QLC_83XX_MODULE_LOADED, &idc->status))
                qlcnic_schedule_work(adapter, qlcnic_sriov_vf_poll_dev_state,
                                     idc->delay);
@@ -1890,7 +1930,7 @@ static int qlcnic_sriov_check_vlan_id(struct qlcnic_sriov *sriov,
        if (!vf->sriov_vlans)
                return err;
 
-       mutex_lock(&vf->vlan_list_lock);
+       spin_lock_bh(&vf->vlan_list_lock);
 
        for (i = 0; i < sriov->num_allowed_vlans; i++) {
                if (vf->sriov_vlans[i] == vlan_id) {
@@ -1899,7 +1939,7 @@ static int qlcnic_sriov_check_vlan_id(struct qlcnic_sriov *sriov,
                }
        }
 
-       mutex_unlock(&vf->vlan_list_lock);
+       spin_unlock_bh(&vf->vlan_list_lock);
        return err;
 }
 
@@ -1908,12 +1948,12 @@ static int qlcnic_sriov_validate_num_vlans(struct qlcnic_sriov *sriov,
 {
        int err = 0;
 
-       mutex_lock(&vf->vlan_list_lock);
+       spin_lock_bh(&vf->vlan_list_lock);
 
        if (vf->num_vlan >= sriov->num_allowed_vlans)
                err = -EINVAL;
 
-       mutex_unlock(&vf->vlan_list_lock);
+       spin_unlock_bh(&vf->vlan_list_lock);
        return err;
 }
 
@@ -1966,7 +2006,7 @@ static void qlcnic_sriov_vlan_operation(struct qlcnic_vf_info *vf, u16 vlan_id,
        if (!vf->sriov_vlans)
                return;
 
-       mutex_lock(&vf->vlan_list_lock);
+       spin_lock_bh(&vf->vlan_list_lock);
 
        switch (opcode) {
        case QLC_VLAN_ADD:
@@ -1979,7 +2019,7 @@ static void qlcnic_sriov_vlan_operation(struct qlcnic_vf_info *vf, u16 vlan_id,
                netdev_err(adapter->netdev, "Invalid VLAN operation\n");
        }
 
-       mutex_unlock(&vf->vlan_list_lock);
+       spin_unlock_bh(&vf->vlan_list_lock);
        return;
 }
 
@@ -1987,6 +2027,7 @@ int qlcnic_sriov_cfg_vf_guest_vlan(struct qlcnic_adapter *adapter,
                                   u16 vid, u8 enable)
 {
        struct qlcnic_sriov *sriov = adapter->ahw->sriov;
+       struct net_device *netdev = adapter->netdev;
        struct qlcnic_vf_info *vf;
        struct qlcnic_cmd_args cmd;
        int ret;
@@ -2012,14 +2053,18 @@ int qlcnic_sriov_cfg_vf_guest_vlan(struct qlcnic_adapter *adapter,
                dev_err(&adapter->pdev->dev,
                        "Failed to configure guest VLAN, err=%d\n", ret);
        } else {
+               netif_addr_lock_bh(netdev);
                qlcnic_free_mac_list(adapter);
+               netif_addr_unlock_bh(netdev);
 
                if (enable)
                        qlcnic_sriov_vlan_operation(vf, vid, QLC_VLAN_ADD);
                else
                        qlcnic_sriov_vlan_operation(vf, vid, QLC_VLAN_DELETE);
 
-               qlcnic_set_multi(adapter->netdev);
+               netif_addr_lock_bh(netdev);
+               qlcnic_set_multi(netdev);
+               netif_addr_unlock_bh(netdev);
        }
 
        qlcnic_free_mbx_args(&cmd);
@@ -2150,11 +2195,11 @@ bool qlcnic_sriov_check_any_vlan(struct qlcnic_vf_info *vf)
 {
        bool err = false;
 
-       mutex_lock(&vf->vlan_list_lock);
+       spin_lock_bh(&vf->vlan_list_lock);
 
        if (vf->num_vlan)
                err = true;
 
-       mutex_unlock(&vf->vlan_list_lock);
+       spin_unlock_bh(&vf->vlan_list_lock);
        return err;
 }
index 2801379915447dc54683719c40058d9d89f8387f..6d2f72f114f2b15333c06e1d87901666e0af9a22 100644 (file)
@@ -16,6 +16,7 @@
 #define QLC_VF_FLOOD_BIT       BIT_16
 #define QLC_FLOOD_MODE         0x5
 #define QLC_SRIOV_ALLOW_VLAN0  BIT_19
+#define QLC_INTR_COAL_TYPE_MASK        0x7
 
 static int qlcnic_sriov_pf_get_vport_handle(struct qlcnic_adapter *, u8);
 
@@ -83,7 +84,7 @@ static int qlcnic_sriov_pf_cal_res_limit(struct qlcnic_adapter *adapter,
        info->max_tx_ques = res->num_tx_queues / max;
 
        if (qlcnic_83xx_pf_check(adapter))
-               num_macs = 1;
+               num_macs = QLCNIC_83XX_SRIOV_VF_MAX_MAC;
 
        info->max_rx_mcast_mac_filters = res->num_rx_mcast_mac_filters;
 
@@ -337,9 +338,12 @@ static int qlcnic_sriov_pf_cfg_vlan_filtering(struct qlcnic_adapter *adapter,
 
        cmd.req.arg[1] = 0x4;
        if (enable) {
+               adapter->flags |= QLCNIC_VLAN_FILTERING;
                cmd.req.arg[1] |= BIT_16;
                if (qlcnic_84xx_check(adapter))
                        cmd.req.arg[1] |= QLC_SRIOV_ALLOW_VLAN0;
+       } else {
+               adapter->flags &= ~QLCNIC_VLAN_FILTERING;
        }
 
        err = qlcnic_issue_cmd(adapter, &cmd);
@@ -471,12 +475,12 @@ static int qlcnic_pci_sriov_disable(struct qlcnic_adapter *adapter)
                return -EPERM;
        }
 
+       qlcnic_sriov_pf_disable(adapter);
+
        rtnl_lock();
        if (netif_running(netdev))
                __qlcnic_down(adapter, netdev);
 
-       qlcnic_sriov_pf_disable(adapter);
-
        qlcnic_sriov_free_vlans(adapter);
 
        qlcnic_sriov_pf_cleanup(adapter);
@@ -595,7 +599,6 @@ static int __qlcnic_pci_sriov_enable(struct qlcnic_adapter *adapter,
 
        qlcnic_sriov_alloc_vlans(adapter);
 
-       err = qlcnic_sriov_pf_enable(adapter, num_vfs);
        return err;
 
 del_flr_queue:
@@ -626,25 +629,36 @@ static int qlcnic_pci_sriov_enable(struct qlcnic_adapter *adapter, int num_vfs)
                __qlcnic_down(adapter, netdev);
 
        err = __qlcnic_pci_sriov_enable(adapter, num_vfs);
-       if (err) {
-               netdev_info(netdev, "Failed to enable SR-IOV on port %d\n",
-                           adapter->portnum);
+       if (err)
+               goto error;
 
-               err = -EIO;
-               if (qlcnic_83xx_configure_opmode(adapter))
-                       goto error;
-       } else {
+       if (netif_running(netdev))
+               __qlcnic_up(adapter, netdev);
+
+       rtnl_unlock();
+       err = qlcnic_sriov_pf_enable(adapter, num_vfs);
+       if (!err) {
                netdev_info(netdev,
                            "SR-IOV is enabled successfully on port %d\n",
                            adapter->portnum);
                /* Return number of vfs enabled */
-               err = num_vfs;
+               return num_vfs;
        }
+
+       rtnl_lock();
        if (netif_running(netdev))
-               __qlcnic_up(adapter, netdev);
+               __qlcnic_down(adapter, netdev);
 
 error:
+       if (!qlcnic_83xx_configure_opmode(adapter)) {
+               if (netif_running(netdev))
+                       __qlcnic_up(adapter, netdev);
+       }
+
        rtnl_unlock();
+       netdev_info(netdev, "Failed to enable SR-IOV on port %d\n",
+                   adapter->portnum);
+
        return err;
 }
 
@@ -773,7 +787,7 @@ static int qlcnic_sriov_cfg_vf_def_mac(struct qlcnic_adapter *adapter,
                                       struct qlcnic_vf_info *vf,
                                       u16 vlan, u8 op)
 {
-       struct qlcnic_cmd_args cmd;
+       struct qlcnic_cmd_args *cmd;
        struct qlcnic_macvlan_mbx mv;
        struct qlcnic_vport *vp;
        u8 *addr;
@@ -783,21 +797,27 @@ static int qlcnic_sriov_cfg_vf_def_mac(struct qlcnic_adapter *adapter,
 
        vp = vf->vp;
 
-       if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIG_MAC_VLAN))
+       cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC);
+       if (!cmd)
                return -ENOMEM;
 
+       err = qlcnic_alloc_mbx_args(cmd, adapter, QLCNIC_CMD_CONFIG_MAC_VLAN);
+       if (err)
+               goto free_cmd;
+
+       cmd->type = QLC_83XX_MBX_CMD_NO_WAIT;
        vpid = qlcnic_sriov_pf_get_vport_handle(adapter, vf->pci_func);
        if (vpid < 0) {
                err = -EINVAL;
-               goto out;
+               goto free_args;
        }
 
        if (vlan)
                op = ((op == QLCNIC_MAC_ADD || op == QLCNIC_MAC_VLAN_ADD) ?
                      QLCNIC_MAC_VLAN_ADD : QLCNIC_MAC_VLAN_DEL);
 
-       cmd.req.arg[1] = op | (1 << 8) | (3 << 6);
-       cmd.req.arg[1] |= ((vpid & 0xffff) << 16) | BIT_31;
+       cmd->req.arg[1] = op | (1 << 8) | (3 << 6);
+       cmd->req.arg[1] |= ((vpid & 0xffff) << 16) | BIT_31;
 
        addr = vp->mac;
        mv.vlan = vlan;
@@ -807,18 +827,18 @@ static int qlcnic_sriov_cfg_vf_def_mac(struct qlcnic_adapter *adapter,
        mv.mac_addr3 = addr[3];
        mv.mac_addr4 = addr[4];
        mv.mac_addr5 = addr[5];
-       buf = &cmd.req.arg[2];
+       buf = &cmd->req.arg[2];
        memcpy(buf, &mv, sizeof(struct qlcnic_macvlan_mbx));
 
-       err = qlcnic_issue_cmd(adapter, &cmd);
+       err = qlcnic_issue_cmd(adapter, cmd);
 
-       if (err)
-               dev_err(&adapter->pdev->dev,
-                       "MAC-VLAN %s to CAM failed, err=%d.\n",
-                       ((op == 1) ? "add " : "delete "), err);
+       if (!err)
+               return err;
 
-out:
-       qlcnic_free_mbx_args(&cmd);
+free_args:
+       qlcnic_free_mbx_args(cmd);
+free_cmd:
+       kfree(cmd);
        return err;
 }
 
@@ -840,7 +860,7 @@ static void qlcnic_83xx_cfg_default_mac_vlan(struct qlcnic_adapter *adapter,
 
        sriov = adapter->ahw->sriov;
 
-       mutex_lock(&vf->vlan_list_lock);
+       spin_lock_bh(&vf->vlan_list_lock);
        if (vf->num_vlan) {
                for (i = 0; i < sriov->num_allowed_vlans; i++) {
                        vlan = vf->sriov_vlans[i];
@@ -849,7 +869,7 @@ static void qlcnic_83xx_cfg_default_mac_vlan(struct qlcnic_adapter *adapter,
                                                            opcode);
                }
        }
-       mutex_unlock(&vf->vlan_list_lock);
+       spin_unlock_bh(&vf->vlan_list_lock);
 
        if (vf->vp->vlan_mode != QLC_PVID_MODE) {
                if (qlcnic_83xx_pf_check(adapter) &&
@@ -1178,19 +1198,41 @@ static int qlcnic_sriov_validate_cfg_intrcoal(struct qlcnic_adapter *adapter,
 {
        struct qlcnic_nic_intr_coalesce *coal = &adapter->ahw->coal;
        u16 ctx_id, pkts, time;
+       int err = -EINVAL;
+       u8 type;
 
+       type = cmd->req.arg[1] & QLC_INTR_COAL_TYPE_MASK;
        ctx_id = cmd->req.arg[1] >> 16;
        pkts = cmd->req.arg[2] & 0xffff;
        time = cmd->req.arg[2] >> 16;
 
-       if (ctx_id != vf->rx_ctx_id)
-               return -EINVAL;
-       if (pkts > coal->rx_packets)
-               return -EINVAL;
-       if (time < coal->rx_time_us)
-               return -EINVAL;
+       switch (type) {
+       case QLCNIC_INTR_COAL_TYPE_RX:
+               if (ctx_id != vf->rx_ctx_id || pkts > coal->rx_packets ||
+                   time < coal->rx_time_us)
+                       goto err_label;
+               break;
+       case QLCNIC_INTR_COAL_TYPE_TX:
+               if (ctx_id != vf->tx_ctx_id || pkts > coal->tx_packets ||
+                   time < coal->tx_time_us)
+                       goto err_label;
+               break;
+       default:
+               netdev_err(adapter->netdev, "Invalid coalescing type 0x%x received\n",
+                          type);
+               return err;
+       }
 
        return 0;
+
+err_label:
+       netdev_err(adapter->netdev, "Expected: rx_ctx_id 0x%x rx_packets 0x%x rx_time_us 0x%x tx_ctx_id 0x%x tx_packets 0x%x tx_time_us 0x%x\n",
+                  vf->rx_ctx_id, coal->rx_packets, coal->rx_time_us,
+                  vf->tx_ctx_id, coal->tx_packets, coal->tx_time_us);
+       netdev_err(adapter->netdev, "Received: ctx_id 0x%x packets 0x%x time_us 0x%x type 0x%x\n",
+                  ctx_id, pkts, time, type);
+
+       return err;
 }
 
 static int qlcnic_sriov_pf_cfg_intrcoal_cmd(struct qlcnic_bc_trans *tran,
@@ -1214,7 +1256,6 @@ static int qlcnic_sriov_validate_cfg_macvlan(struct qlcnic_adapter *adapter,
                                             struct qlcnic_vf_info *vf,
                                             struct qlcnic_cmd_args *cmd)
 {
-       struct qlcnic_macvlan_mbx *macvlan;
        struct qlcnic_vport *vp = vf->vp;
        u8 op, new_op;
 
@@ -1224,14 +1265,6 @@ static int qlcnic_sriov_validate_cfg_macvlan(struct qlcnic_adapter *adapter,
        cmd->req.arg[1] |= (vf->vp->handle << 16);
        cmd->req.arg[1] |= BIT_31;
 
-       macvlan = (struct qlcnic_macvlan_mbx *)&cmd->req.arg[2];
-       if (!(macvlan->mac_addr0 & BIT_0)) {
-               dev_err(&adapter->pdev->dev,
-                       "MAC address change is not allowed from VF %d",
-                       vf->pci_func);
-               return -EINVAL;
-       }
-
        if (vp->vlan_mode == QLC_PVID_MODE) {
                op = cmd->req.arg[1] & 0x7;
                cmd->req.arg[1] &= ~0x7;
index cd346e27f2e1270078a7580c5e563bc178d5ef37..f5786d5792df06fe16db6f7ffd2276f9bdabe96f 100644 (file)
 #include <linux/sysfs.h>
 #include <linux/aer.h>
 #include <linux/log2.h>
+#ifdef CONFIG_QLCNIC_HWMON
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#endif
 
 #define QLC_STATUS_UNSUPPORTED_CMD     -2
 
@@ -358,6 +362,8 @@ int qlcnic_is_valid_nic_func(struct qlcnic_adapter *adapter, u8 pci_func)
                if (adapter->npars[i].pci_func == pci_func)
                        return i;
        }
+
+       dev_err(&adapter->pdev->dev, "%s: Invalid nic function\n", __func__);
        return -EINVAL;
 }
 
@@ -1243,6 +1249,68 @@ static struct bin_attribute bin_attr_flash = {
        .write = qlcnic_83xx_sysfs_flash_write_handler,
 };
 
+#ifdef CONFIG_QLCNIC_HWMON
+
+static ssize_t qlcnic_hwmon_show_temp(struct device *dev,
+                                     struct device_attribute *dev_attr,
+                                     char *buf)
+{
+       struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+       unsigned int temperature = 0, value = 0;
+
+       if (qlcnic_83xx_check(adapter))
+               value = QLCRDX(adapter->ahw, QLC_83XX_ASIC_TEMP);
+       else if (qlcnic_82xx_check(adapter))
+               value = QLC_SHARED_REG_RD32(adapter, QLCNIC_ASIC_TEMP);
+
+       temperature = qlcnic_get_temp_val(value);
+       /* display millidegree celcius */
+       temperature *= 1000;
+       return sprintf(buf, "%u\n", temperature);
+}
+
+/* hwmon-sysfs attributes */
+static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO,
+                         qlcnic_hwmon_show_temp, NULL, 1);
+
+static struct attribute *qlcnic_hwmon_attrs[] = {
+       &sensor_dev_attr_temp1_input.dev_attr.attr,
+       NULL
+};
+
+ATTRIBUTE_GROUPS(qlcnic_hwmon);
+
+void qlcnic_register_hwmon_dev(struct qlcnic_adapter *adapter)
+{
+       struct device *dev = &adapter->pdev->dev;
+       struct device *hwmon_dev;
+
+       /* Skip hwmon registration for a VF device */
+       if (qlcnic_sriov_vf_check(adapter)) {
+               adapter->ahw->hwmon_dev = NULL;
+               return;
+       }
+       hwmon_dev = hwmon_device_register_with_groups(dev, qlcnic_driver_name,
+                                                     adapter,
+                                                     qlcnic_hwmon_groups);
+       if (IS_ERR(hwmon_dev)) {
+               dev_err(dev, "Cannot register with hwmon, err=%ld\n",
+                       PTR_ERR(hwmon_dev));
+               hwmon_dev = NULL;
+       }
+       adapter->ahw->hwmon_dev = hwmon_dev;
+}
+
+void qlcnic_unregister_hwmon_dev(struct qlcnic_adapter *adapter)
+{
+       struct device *hwmon_dev = adapter->ahw->hwmon_dev;
+       if (hwmon_dev) {
+               hwmon_device_unregister(hwmon_dev);
+               adapter->ahw->hwmon_dev = NULL;
+       }
+}
+#endif
+
 void qlcnic_create_sysfs_entries(struct qlcnic_adapter *adapter)
 {
        struct device *dev = &adapter->pdev->dev;
index 0a1d76acab8171929e3c6f9ad6139b48484ebbcc..b40050e03a56f7524e19dc7c215165d0a0c75585 100644 (file)
@@ -3595,7 +3595,7 @@ static int ql_request_irq(struct ql_adapter *qdev)
        }
        return status;
 err_irq:
-       netif_err(qdev, ifup, qdev->ndev, "Failed to get the interrupts!!!/n");
+       netif_err(qdev, ifup, qdev->ndev, "Failed to get the interrupts!!!\n");
        ql_free_irq(qdev);
        return status;
 }
@@ -4770,7 +4770,7 @@ static int qlge_probe(struct pci_dev *pdev,
        ndev->irq = pdev->irq;
 
        ndev->netdev_ops = &qlge_netdev_ops;
-       SET_ETHTOOL_OPS(ndev, &qlge_ethtool_ops);
+       ndev->ethtool_ops = &qlge_ethtool_ops;
        ndev->watchdog_timeo = 10 * HZ;
 
        err = register_netdev(ndev);
index aa1c079f231dc6f2cfc017cd4950f9ebb8e1e9da..be425ad5e82487e94a91b7371466f7f93ab558eb 100644 (file)
@@ -7125,7 +7125,7 @@ rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
        for (i = 0; i < ETH_ALEN; i++)
                dev->dev_addr[i] = RTL_R8(MAC0 + i);
 
-       SET_ETHTOOL_OPS(dev, &rtl8169_ethtool_ops);
+       dev->ethtool_ops = &rtl8169_ethtool_ops;
        dev->watchdog_timeo = RTL8169_TX_TIMEOUT;
 
        netif_napi_add(dev, &tp->napi, rtl8169_poll, R8169_NAPI_WEIGHT);
index 6a9509ccd33b29dd84ddcd0b48269f1c24b8da63..6a94ede699b4d016ea00dd31077cbeb719a18c29 100644 (file)
@@ -2627,8 +2627,8 @@ static int sh_mdio_init(struct sh_eth_private *mdp,
                 pdev->name, pdev->id);
 
        /* PHY IRQ */
-       mdp->mii_bus->irq = devm_kzalloc(dev, sizeof(int) * PHY_MAX_ADDR,
-                                        GFP_KERNEL);
+       mdp->mii_bus->irq = devm_kmalloc_array(dev, PHY_MAX_ADDR, sizeof(int),
+                                              GFP_KERNEL);
        if (!mdp->mii_bus->irq) {
                ret = -ENOMEM;
                goto out_free_bus;
@@ -2843,7 +2843,7 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
                ndev->netdev_ops = &sh_eth_netdev_ops_tsu;
        else
                ndev->netdev_ops = &sh_eth_netdev_ops;
-       SET_ETHTOOL_OPS(ndev, &sh_eth_ethtool_ops);
+       ndev->ethtool_ops = &sh_eth_ethtool_ops;
        ndev->watchdog_timeo = TX_TIMEOUT;
 
        /* debug message level */
index 6203c7d8550fda4a530ec91e0a089870e507b4cb..45019649bbbd73227840d866ed92709e22a0d49c 100644 (file)
@@ -358,6 +358,8 @@ struct sxgbe_core_ops {
        /* Enable disable checksum offload operations */
        void (*enable_rx_csum)(void __iomem *ioaddr);
        void (*disable_rx_csum)(void __iomem *ioaddr);
+       void (*enable_rxqueue)(void __iomem *ioaddr, int queue_num);
+       void (*disable_rxqueue)(void __iomem *ioaddr, int queue_num);
 };
 
 const struct sxgbe_core_ops *sxgbe_get_core_ops(void);
index c4da7a2b002a16fa432f0bbe6fbce25be9085acc..58c35692560e599f0977c6460edcd0a616889e5f 100644 (file)
@@ -165,6 +165,26 @@ static void sxgbe_core_set_speed(void __iomem *ioaddr, unsigned char speed)
        writel(tx_cfg, ioaddr + SXGBE_CORE_TX_CONFIG_REG);
 }
 
+static void sxgbe_core_enable_rxqueue(void __iomem *ioaddr, int queue_num)
+{
+       u32 reg_val;
+
+       reg_val = readl(ioaddr + SXGBE_CORE_RX_CTL0_REG);
+       reg_val &= ~(SXGBE_CORE_RXQ_ENABLE_MASK << queue_num);
+       reg_val |= SXGBE_CORE_RXQ_ENABLE;
+       writel(reg_val, ioaddr + SXGBE_CORE_RX_CTL0_REG);
+}
+
+static void sxgbe_core_disable_rxqueue(void __iomem *ioaddr, int queue_num)
+{
+       u32 reg_val;
+
+       reg_val = readl(ioaddr + SXGBE_CORE_RX_CTL0_REG);
+       reg_val &= ~(SXGBE_CORE_RXQ_ENABLE_MASK << queue_num);
+       reg_val |= SXGBE_CORE_RXQ_DISABLE;
+       writel(reg_val, ioaddr + SXGBE_CORE_RX_CTL0_REG);
+}
+
 static void  sxgbe_set_eee_mode(void __iomem *ioaddr)
 {
        u32 ctrl;
@@ -254,6 +274,8 @@ static const struct sxgbe_core_ops core_ops = {
        .set_eee_pls            = sxgbe_set_eee_pls,
        .enable_rx_csum         = sxgbe_enable_rx_csum,
        .disable_rx_csum        = sxgbe_disable_rx_csum,
+       .enable_rxqueue         = sxgbe_core_enable_rxqueue,
+       .disable_rxqueue        = sxgbe_core_disable_rxqueue,
 };
 
 const struct sxgbe_core_ops *sxgbe_get_core_ops(void)
index e896dbbd2e156514eaf1d83ed8e132fbc3d88e37..2686bb5b6765680a8e18eeabd89146384da1c6f5 100644 (file)
@@ -45,10 +45,10 @@ static void sxgbe_prepare_tx_desc(struct sxgbe_tx_norm_desc *p, u8 is_fd,
        p->tdes23.tx_rd_des23.first_desc = is_fd;
        p->tdes23.tx_rd_des23.buf1_size = buf1_len;
 
-       p->tdes23.tx_rd_des23.tx_pkt_len.cksum_pktlen.total_pkt_len = pkt_len;
+       p->tdes23.tx_rd_des23.tx_pkt_len.pkt_len.total_pkt_len = pkt_len;
 
        if (cksum)
-               p->tdes23.tx_rd_des23.tx_pkt_len.cksum_pktlen.cksum_ctl = cic_full;
+               p->tdes23.tx_rd_des23.cksum_ctl = cic_full;
 }
 
 /* Set VLAN control information */
@@ -233,6 +233,12 @@ static void sxgbe_set_rx_owner(struct sxgbe_rx_norm_desc *p)
        p->rdes23.rx_rd_des23.own_bit = 1;
 }
 
+/* Set Interrupt on completion bit */
+static void sxgbe_set_rx_int_on_com(struct sxgbe_rx_norm_desc *p)
+{
+       p->rdes23.rx_rd_des23.int_on_com = 1;
+}
+
 /* Get the receive frame size */
 static int sxgbe_get_rx_frame_len(struct sxgbe_rx_norm_desc *p)
 {
@@ -498,6 +504,7 @@ static const struct sxgbe_desc_ops desc_ops = {
        .init_rx_desc                   = sxgbe_init_rx_desc,
        .get_rx_owner                   = sxgbe_get_rx_owner,
        .set_rx_owner                   = sxgbe_set_rx_owner,
+       .set_rx_int_on_com              = sxgbe_set_rx_int_on_com,
        .get_rx_frame_len               = sxgbe_get_rx_frame_len,
        .get_rx_fd_status               = sxgbe_get_rx_fd_status,
        .get_rx_ld_status               = sxgbe_get_rx_ld_status,
index 838cb9fb0ea979514bafc7b068cd4c09f3ee5d49..18609324db723dc2fbf5c50880d876ba83deb3d3 100644 (file)
@@ -39,22 +39,22 @@ struct sxgbe_tx_norm_desc {
                        u32 int_on_com:1;
                        /* TDES3 */
                        union {
-                               u32 tcp_payload_len:18;
+                               u16 tcp_payload_len;
                                struct {
                                        u32 total_pkt_len:15;
                                        u32 reserved1:1;
-                                       u32 cksum_ctl:2;
-                               } cksum_pktlen;
+                               } pkt_len;
                        } tx_pkt_len;
 
-                       u32 tse_bit:1;
-                       u32 tcp_hdr_len:4;
-                       u32 sa_insert_ctl:3;
-                       u32 crc_pad_ctl:2;
-                       u32 last_desc:1;
-                       u32 first_desc:1;
-                       u32 ctxt_bit:1;
-                       u32 own_bit:1;
+                       u16 cksum_ctl:2;
+                       u16 tse_bit:1;
+                       u16 tcp_hdr_len:4;
+                       u16 sa_insert_ctl:3;
+                       u16 crc_pad_ctl:2;
+                       u16 last_desc:1;
+                       u16 first_desc:1;
+                       u16 ctxt_bit:1;
+                       u16 own_bit:1;
                } tx_rd_des23;
 
                /* tx write back Desc 2,3 */
@@ -70,25 +70,20 @@ struct sxgbe_tx_norm_desc {
 
 struct sxgbe_rx_norm_desc {
        union {
-               u32 rdes0; /* buf1 address */
-               struct {
+               u64 rdes01; /* buf1 address */
+               union {
                        u32 out_vlan_tag:16;
                        u32 in_vlan_tag:16;
-               } wb_rx_des0;
-       } rd_wb_des0;
-
-       union {
-               u32 rdes1;      /* buf2 address or buf1[63:32] */
-               u32 rss_hash;   /* Write-back RX */
-       } rd_wb_des1;
+                       u32 rss_hash;
+               } rx_wb_des01;
+       } rdes01;
 
        union {
                /* RX Read format Desc 2,3 */
                struct{
                        /* RDES2 */
-                       u32 buf2_addr;
+                       u64 buf2_addr:62;
                        /* RDES3 */
-                       u32 buf2_hi_addr:30;
                        u32 int_on_com:1;
                        u32 own_bit:1;
                } rx_rd_des23;
@@ -263,6 +258,9 @@ struct sxgbe_desc_ops {
        /* Set own bit */
        void (*set_rx_owner)(struct sxgbe_rx_norm_desc *p);
 
+       /* Set Interrupt on completion bit */
+       void (*set_rx_int_on_com)(struct sxgbe_rx_norm_desc *p);
+
        /* Get the receive frame size */
        int (*get_rx_frame_len)(struct sxgbe_rx_norm_desc *p);
 
index 4d989ff6c978a8ad67d36afbb7d2df5ef6632929..bb9b5b8afc5f4417bae05c4ef0e1ea02f84b7421 100644 (file)
 /* DMA core initialization */
 static int sxgbe_dma_init(void __iomem *ioaddr, int fix_burst, int burst_map)
 {
-       int retry_count = 10;
        u32 reg_val;
 
-       /* reset the DMA */
-       writel(SXGBE_DMA_SOFT_RESET, ioaddr + SXGBE_DMA_MODE_REG);
-       while (retry_count--) {
-               if (!(readl(ioaddr + SXGBE_DMA_MODE_REG) &
-                     SXGBE_DMA_SOFT_RESET))
-                       break;
-               mdelay(10);
-       }
-
-       if (retry_count < 0)
-               return -EBUSY;
-
        reg_val = readl(ioaddr + SXGBE_DMA_SYSBUS_MODE_REG);
 
        /* if fix_burst = 0, Set UNDEF = 1 of DMA_Sys_Mode Register.
index 0415fa50eeb77b82376214708419f6e4186e876f..c0981ae45874acc5c247a2b37d84a90296e1196b 100644 (file)
@@ -520,5 +520,5 @@ static const struct ethtool_ops sxgbe_ethtool_ops = {
 
 void sxgbe_set_ethtool_ops(struct net_device *netdev)
 {
-       SET_ETHTOOL_OPS(netdev, &sxgbe_ethtool_ops);
+       netdev->ethtool_ops = &sxgbe_ethtool_ops;
 }
index 137f366ec7e4bc9b1775ee6b4854b5ef09a37e2d..698494481d18072ca00ddecb825ea92bd4e86c56 100644 (file)
@@ -1076,6 +1076,9 @@ static int sxgbe_open(struct net_device *dev)
 
        /* Initialize the MAC Core */
        priv->hw->mac->core_init(priv->ioaddr);
+       SXGBE_FOR_EACH_QUEUE(SXGBE_RX_QUEUES, queue_num) {
+               priv->hw->mac->enable_rxqueue(priv->ioaddr, queue_num);
+       }
 
        /* Request the IRQ lines */
        ret = devm_request_irq(priv->device, priv->irq, sxgbe_common_interrupt,
@@ -1452,6 +1455,7 @@ static void sxgbe_rx_refill(struct sxgbe_priv_data *priv)
                /* Added memory barrier for RX descriptor modification */
                wmb();
                priv->hw->desc->set_rx_owner(p);
+               priv->hw->desc->set_rx_int_on_com(p);
                /* Added memory barrier for RX descriptor modification */
                wmb();
        }
@@ -2034,6 +2038,24 @@ static int sxgbe_hw_init(struct sxgbe_priv_data * const priv)
        return 0;
 }
 
+static int sxgbe_sw_reset(void __iomem *addr)
+{
+       int retry_count = 10;
+
+       writel(SXGBE_DMA_SOFT_RESET, addr + SXGBE_DMA_MODE_REG);
+       while (retry_count--) {
+               if (!(readl(addr + SXGBE_DMA_MODE_REG) &
+                     SXGBE_DMA_SOFT_RESET))
+                       break;
+               mdelay(10);
+       }
+
+       if (retry_count < 0)
+               return -EBUSY;
+
+       return 0;
+}
+
 /**
  * sxgbe_drv_probe
  * @device: device pointer
@@ -2066,6 +2088,10 @@ struct sxgbe_priv_data *sxgbe_drv_probe(struct device *device,
        priv->plat = plat_dat;
        priv->ioaddr = addr;
 
+       ret = sxgbe_sw_reset(priv->ioaddr);
+       if (ret)
+               goto error_free_netdev;
+
        /* Verify driver arguments */
        sxgbe_verify_args();
 
@@ -2182,9 +2208,14 @@ error_free_netdev:
 int sxgbe_drv_remove(struct net_device *ndev)
 {
        struct sxgbe_priv_data *priv = netdev_priv(ndev);
+       u8 queue_num;
 
        netdev_info(ndev, "%s: removing driver\n", __func__);
 
+       SXGBE_FOR_EACH_QUEUE(SXGBE_RX_QUEUES, queue_num) {
+               priv->hw->mac->disable_rxqueue(priv->ioaddr, queue_num);
+       }
+
        priv->hw->dma->stop_rx(priv->ioaddr, SXGBE_RX_QUEUES);
        priv->hw->dma->stop_tx(priv->ioaddr, SXGBE_TX_QUEUES);
 
index 01af2cbb479d10a96c1038bfc3f2ff167a20beb3..43ccb4a6de15a3fe2dedc9198a3866fd89143f47 100644 (file)
@@ -27,7 +27,7 @@
 #define SXGBE_SMA_PREAD_CMD    0x02 /* post read  increament address */
 #define SXGBE_SMA_READ_CMD     0x03 /* read command */
 #define SXGBE_SMA_SKIP_ADDRFRM 0x00040000 /* skip the address frame */
-#define SXGBE_MII_BUSY         0x00800000 /* mii busy */
+#define SXGBE_MII_BUSY         0x00400000 /* mii busy */
 
 static int sxgbe_mdio_busy_wait(void __iomem *ioaddr, unsigned int mii_data)
 {
@@ -147,6 +147,7 @@ int sxgbe_mdio_register(struct net_device *ndev)
        struct sxgbe_mdio_bus_data *mdio_data = priv->plat->mdio_bus_data;
        int err, phy_addr;
        int *irqlist;
+       bool phy_found = false;
        bool act;
 
        /* allocate the new mdio bus */
@@ -162,7 +163,7 @@ int sxgbe_mdio_register(struct net_device *ndev)
                irqlist = priv->mii_irq;
 
        /* assign mii bus fields */
-       mdio_bus->name = "samsxgbe";
+       mdio_bus->name = "sxgbe";
        mdio_bus->read = &sxgbe_mdio_read;
        mdio_bus->write = &sxgbe_mdio_write;
        snprintf(mdio_bus->id, MII_BUS_ID_SIZE, "%s-%x",
@@ -216,13 +217,22 @@ int sxgbe_mdio_register(struct net_device *ndev)
                        netdev_info(ndev, "PHY ID %08x at %d IRQ %s (%s)%s\n",
                                    phy->phy_id, phy_addr, irq_str,
                                    dev_name(&phy->dev), act ? " active" : "");
+                       phy_found = true;
                }
        }
 
+       if (!phy_found) {
+               netdev_err(ndev, "PHY not found\n");
+               goto phyfound_err;
+       }
+
        priv->mii = mdio_bus;
 
        return 0;
 
+phyfound_err:
+       err = -ENODEV;
+       mdiobus_unregister(mdio_bus);
 mdiobus_err:
        mdiobus_free(mdio_bus);
        return err;
index 5a89acb4c505fc83f3847a10c437c961db079dec..56f8bf5a3f1b99564a2b055830810fe3d156e6c4 100644 (file)
 #define SXGBE_CORE_RX_CTL2_REG         0x00A8
 #define SXGBE_CORE_RX_CTL3_REG         0x00AC
 
+#define SXGBE_CORE_RXQ_ENABLE_MASK     0x0003
+#define SXGBE_CORE_RXQ_ENABLE          0x0002
+#define SXGBE_CORE_RXQ_DISABLE         0x0000
+
 /* Interrupt Registers */
 #define SXGBE_CORE_INT_STATUS_REG      0x00B0
 #define SXGBE_CORE_INT_ENABLE_REG      0x00B4
index 63d595fd3cc5f5a9df298dfdd2583abcec9a3a03..1e274045970fa011c6dacb6baeb77db95178b164 100644 (file)
@@ -2248,7 +2248,7 @@ static int efx_register_netdev(struct efx_nic *efx)
        } else {
                net_dev->netdev_ops = &efx_farch_netdev_ops;
        }
-       SET_ETHTOOL_OPS(net_dev, &efx_ethtool_ops);
+       net_dev->ethtool_ops = &efx_ethtool_ops;
        net_dev->gso_max_segs = EFX_TSO_MAX_SEGS;
 
        rtnl_lock();
index acbbe48a519c0c673ff8de55c31b1690c3976cb5..a86339903b9b0662a835df431f2b79d0677aa93f 100644 (file)
@@ -1877,7 +1877,7 @@ static int sis190_init_one(struct pci_dev *pdev,
 
        dev->netdev_ops = &sis190_netdev_ops;
 
-       SET_ETHTOOL_OPS(dev, &sis190_ethtool_ops);
+       dev->ethtool_ops = &sis190_ethtool_ops;
        dev->watchdog_timeo = SIS190_TX_TIMEOUT;
 
        spin_lock_init(&tp->lock);
index c7a4868571f9f81f1607dc50225dec8bdaf6f017..6b33127ab352a43ed6a787af7eedde554241e1b3 100644 (file)
@@ -318,7 +318,7 @@ static int smc91c92_probe(struct pcmcia_device *link)
 
     /* The SMC91c92-specific entries in the device structure. */
     dev->netdev_ops = &smc_netdev_ops;
-    SET_ETHTOOL_OPS(dev, &ethtool_ops);
+    dev->ethtool_ops = &ethtool_ops;
     dev->watchdog_timeo = TX_TIMEOUT;
 
     smc->mii_if.dev = dev;
index d1b4dca53a9d10be97f05e2e09dd08418598bf05..bcaa41af1e628e9f8d2e84fccc7a38ea716e428c 100644 (file)
@@ -147,18 +147,19 @@ MODULE_ALIAS("platform:smc91x");
  */
 #define MII_DELAY              1
 
-#if SMC_DEBUG > 0
-#define DBG(n, dev, args...)                           \
-       do {                                            \
-               if (SMC_DEBUG >= (n))                   \
-                       netdev_dbg(dev, args);          \
+#define DBG(n, dev, fmt, ...)                                  \
+       do {                                                    \
+               if (SMC_DEBUG >= (n))                           \
+                       netdev_dbg(dev, fmt, ##__VA_ARGS__);    \
        } while (0)
 
-#define PRINTK(dev, args...)   netdev_info(dev, args)
-#else
-#define DBG(n, dev, args...)   do { } while (0)
-#define PRINTK(dev, args...)   netdev_dbg(dev, args)
-#endif
+#define PRINTK(dev, fmt, ...)                                  \
+       do {                                                    \
+               if (SMC_DEBUG > 0)                              \
+                       netdev_info(dev, fmt, ##__VA_ARGS__);   \
+               else                                            \
+                       netdev_dbg(dev, fmt, ##__VA_ARGS__);    \
+       } while (0)
 
 #if SMC_DEBUG > 3
 static void PRINT_PKT(u_char *buf, int length)
@@ -191,7 +192,7 @@ static void PRINT_PKT(u_char *buf, int length)
        pr_cont("\n");
 }
 #else
-#define PRINT_PKT(x...)  do { } while (0)
+static inline void PRINT_PKT(u_char *buf, int length) { }
 #endif
 
 
@@ -1781,7 +1782,7 @@ static int smc_findirq(struct smc_local *lp)
        int timeout = 20;
        unsigned long cookie;
 
-       DBG(2, dev, "%s: %s\n", CARDNAME, __func__);
+       DBG(2, lp->dev, "%s: %s\n", CARDNAME, __func__);
 
        cookie = probe_irq_on();
 
index c5f9cb85c8ef9c31c2ce05894faffa2a484a8093..c963394ded6c569fd06bb2cba90f2cdd6c009173 100644 (file)
@@ -784,5 +784,5 @@ static const struct ethtool_ops stmmac_ethtool_ops = {
 
 void stmmac_set_ethtool_ops(struct net_device *netdev)
 {
-       SET_ETHTOOL_OPS(netdev, &stmmac_ethtool_ops);
+       netdev->ethtool_ops = &stmmac_ethtool_ops;
 }
index a468eb10782361e31fd0b0af5ce6be33d68a2578..a5b1e1b776fe3313c5c9852062ed66ea1547c285 100644 (file)
@@ -205,10 +205,13 @@ int stmmac_mdio_register(struct net_device *ndev)
        if (new_bus == NULL)
                return -ENOMEM;
 
-       if (mdio_bus_data->irqs)
+       if (mdio_bus_data->irqs) {
                irqlist = mdio_bus_data->irqs;
-       else
+       } else {
+               for (addr = 0; addr < PHY_MAX_ADDR; addr++)
+                       priv->mii_irq[addr] = PHY_POLL;
                irqlist = priv->mii_irq;
+       }
 
 #ifdef CONFIG_OF
        if (priv->device->of_node)
index df8d383acf48ed0da087bb19d144499484ac0376..b9ac20f42651bd90e33e5fcb3da38db5401117a2 100644 (file)
@@ -246,7 +246,7 @@ static inline void cas_lock_tx(struct cas *cp)
        int i;
 
        for (i = 0; i < N_TX_RINGS; i++)
-               spin_lock(&cp->tx_lock[i]);
+               spin_lock_nested(&cp->tx_lock[i], i);
 }
 
 static inline void cas_lock_all(struct cas *cp)
index 2ead87759ab411819be6cb03fa8de3cef8ea2a1f..38da73a2a886b52b3753d3b9dd31f214d345e4fd 100644 (file)
@@ -2413,7 +2413,7 @@ static void bdx_set_ethtool_ops(struct net_device *netdev)
                .get_ethtool_stats = bdx_get_ethtool_stats,
        };
 
-       SET_ETHTOOL_OPS(netdev, &bdx_ethtool_ops);
+       netdev->ethtool_ops = &bdx_ethtool_ops;
 }
 
 /**
index 148da9ae83666ce7cd2284c1cc75f18e9957f15f..aa8bf45e53dc9b0ddab4265b2e527067339a00fc 100644 (file)
@@ -29,6 +29,8 @@
 #define AM33XX_GMII_SEL_RMII2_IO_CLK_EN        BIT(7)
 #define AM33XX_GMII_SEL_RMII1_IO_CLK_EN        BIT(6)
 
+#define GMII_SEL_MODE_MASK             0x3
+
 struct cpsw_phy_sel_priv {
        struct device   *dev;
        u32 __iomem     *gmii_sel;
@@ -65,7 +67,7 @@ static void cpsw_gmii_sel_am3352(struct cpsw_phy_sel_priv *priv,
                break;
        };
 
-       mask = 0x3 << (slave * 2) | BIT(slave + 6);
+       mask = GMII_SEL_MODE_MASK << (slave * 2) | BIT(slave + 6);
        mode <<= slave * 2;
 
        if (priv->rmii_clock_external) {
@@ -81,6 +83,55 @@ static void cpsw_gmii_sel_am3352(struct cpsw_phy_sel_priv *priv,
        writel(reg, priv->gmii_sel);
 }
 
+static void cpsw_gmii_sel_dra7xx(struct cpsw_phy_sel_priv *priv,
+                                phy_interface_t phy_mode, int slave)
+{
+       u32 reg;
+       u32 mask;
+       u32 mode = 0;
+
+       reg = readl(priv->gmii_sel);
+
+       switch (phy_mode) {
+       case PHY_INTERFACE_MODE_RMII:
+               mode = AM33XX_GMII_SEL_MODE_RMII;
+               break;
+
+       case PHY_INTERFACE_MODE_RGMII:
+       case PHY_INTERFACE_MODE_RGMII_ID:
+       case PHY_INTERFACE_MODE_RGMII_RXID:
+       case PHY_INTERFACE_MODE_RGMII_TXID:
+               mode = AM33XX_GMII_SEL_MODE_RGMII;
+               break;
+
+       case PHY_INTERFACE_MODE_MII:
+       default:
+               mode = AM33XX_GMII_SEL_MODE_MII;
+               break;
+       };
+
+       switch (slave) {
+       case 0:
+               mask = GMII_SEL_MODE_MASK;
+               break;
+       case 1:
+               mask = GMII_SEL_MODE_MASK << 4;
+               mode <<= 4;
+               break;
+       default:
+               dev_err(priv->dev, "invalid slave number...\n");
+               return;
+       }
+
+       if (priv->rmii_clock_external)
+               dev_err(priv->dev, "RMII External clock is not supported\n");
+
+       reg &= ~mask;
+       reg |= mode;
+
+       writel(reg, priv->gmii_sel);
+}
+
 static struct platform_driver cpsw_phy_sel_driver;
 static int match(struct device *dev, void *data)
 {
@@ -112,6 +163,14 @@ static const struct of_device_id cpsw_phy_sel_id_table[] = {
                .compatible     = "ti,am3352-cpsw-phy-sel",
                .data           = &cpsw_gmii_sel_am3352,
        },
+       {
+               .compatible     = "ti,dra7xx-cpsw-phy-sel",
+               .data           = &cpsw_gmii_sel_dra7xx,
+       },
+       {
+               .compatible     = "ti,am43xx-cpsw-phy-sel",
+               .data           = &cpsw_gmii_sel_am3352,
+       },
        {}
 };
 MODULE_DEVICE_TABLE(of, cpsw_phy_sel_id_table);
@@ -132,6 +191,7 @@ static int cpsw_phy_sel_probe(struct platform_device *pdev)
                return -ENOMEM;
        }
 
+       priv->dev = &pdev->dev;
        priv->cpsw_phy_sel = of_id->data;
 
        res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "gmii-sel");
index 36aa109416c4c3a387a440795a8d3611cf3d4ded..ff380dac6629d16f4d3642f9ed6789ec2da7f2ab 100644 (file)
@@ -143,13 +143,13 @@ do {                                                              \
                u32 i;          \
                for (i = 0; i < priv->num_irqs; i++) \
                        enable_irq(priv->irqs_table[i]); \
-       } while (0);
+       } while (0)
 #define cpsw_disable_irq(priv) \
        do {                    \
                u32 i;          \
                for (i = 0; i < priv->num_irqs; i++) \
                        disable_irq_nosync(priv->irqs_table[i]); \
-       } while (0);
+       } while (0)
 
 #define cpsw_slave_index(priv)                         \
                ((priv->data.dual_emac) ? priv->emac_port :     \
@@ -248,20 +248,31 @@ struct cpsw_ss_regs {
 #define TS_131              (1<<11) /* Time Sync Dest IP Addr 131 enable */
 #define TS_130              (1<<10) /* Time Sync Dest IP Addr 130 enable */
 #define TS_129              (1<<9)  /* Time Sync Dest IP Addr 129 enable */
-#define TS_BIT8             (1<<8)  /* ts_ttl_nonzero? */
+#define TS_TTL_NONZERO      (1<<8)  /* Time Sync Time To Live Non-zero enable */
+#define TS_ANNEX_F_EN       (1<<6)  /* Time Sync Annex F enable */
 #define TS_ANNEX_D_EN       (1<<4)  /* Time Sync Annex D enable */
 #define TS_LTYPE2_EN        (1<<3)  /* Time Sync LTYPE 2 enable */
 #define TS_LTYPE1_EN        (1<<2)  /* Time Sync LTYPE 1 enable */
 #define TS_TX_EN            (1<<1)  /* Time Sync Transmit Enable */
 #define TS_RX_EN            (1<<0)  /* Time Sync Receive Enable */
 
-#define CTRL_TS_BITS \
-       (TS_320 | TS_319 | TS_132 | TS_131 | TS_130 | TS_129 | TS_BIT8 | \
-        TS_ANNEX_D_EN | TS_LTYPE1_EN)
+#define CTRL_V2_TS_BITS \
+       (TS_320 | TS_319 | TS_132 | TS_131 | TS_130 | TS_129 |\
+        TS_TTL_NONZERO  | TS_ANNEX_D_EN | TS_LTYPE1_EN)
+
+#define CTRL_V2_ALL_TS_MASK (CTRL_V2_TS_BITS | TS_TX_EN | TS_RX_EN)
+#define CTRL_V2_TX_TS_BITS  (CTRL_V2_TS_BITS | TS_TX_EN)
+#define CTRL_V2_RX_TS_BITS  (CTRL_V2_TS_BITS | TS_RX_EN)
+
+
+#define CTRL_V3_TS_BITS \
+       (TS_320 | TS_319 | TS_132 | TS_131 | TS_130 | TS_129 |\
+        TS_TTL_NONZERO | TS_ANNEX_F_EN | TS_ANNEX_D_EN |\
+        TS_LTYPE1_EN)
 
-#define CTRL_ALL_TS_MASK (CTRL_TS_BITS | TS_TX_EN | TS_RX_EN)
-#define CTRL_TX_TS_BITS  (CTRL_TS_BITS | TS_TX_EN)
-#define CTRL_RX_TS_BITS  (CTRL_TS_BITS | TS_RX_EN)
+#define CTRL_V3_ALL_TS_MASK (CTRL_V3_TS_BITS | TS_TX_EN | TS_RX_EN)
+#define CTRL_V3_TX_TS_BITS  (CTRL_V3_TS_BITS | TS_TX_EN)
+#define CTRL_V3_RX_TS_BITS  (CTRL_V3_TS_BITS | TS_RX_EN)
 
 /* Bit definitions for the CPSW2_TS_SEQ_MTYPE register */
 #define TS_SEQ_ID_OFFSET_SHIFT   (16)    /* Time Sync Sequence ID Offset */
@@ -1376,13 +1387,27 @@ static void cpsw_hwtstamp_v2(struct cpsw_priv *priv)
                slave = &priv->slaves[priv->data.active_slave];
 
        ctrl = slave_read(slave, CPSW2_CONTROL);
-       ctrl &= ~CTRL_ALL_TS_MASK;
+       switch (priv->version) {
+       case CPSW_VERSION_2:
+               ctrl &= ~CTRL_V2_ALL_TS_MASK;
 
-       if (priv->cpts->tx_enable)
-               ctrl |= CTRL_TX_TS_BITS;
+               if (priv->cpts->tx_enable)
+                       ctrl |= CTRL_V2_TX_TS_BITS;
 
-       if (priv->cpts->rx_enable)
-               ctrl |= CTRL_RX_TS_BITS;
+               if (priv->cpts->rx_enable)
+                       ctrl |= CTRL_V2_RX_TS_BITS;
+       break;
+       case CPSW_VERSION_3:
+       default:
+               ctrl &= ~CTRL_V3_ALL_TS_MASK;
+
+               if (priv->cpts->tx_enable)
+                       ctrl |= CTRL_V3_TX_TS_BITS;
+
+               if (priv->cpts->rx_enable)
+                       ctrl |= CTRL_V3_RX_TS_BITS;
+       break;
+       }
 
        mtype = (30 << TS_SEQ_ID_OFFSET_SHIFT) | EVENT_MSG_BITS;
 
@@ -1398,7 +1423,8 @@ static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
        struct hwtstamp_config cfg;
 
        if (priv->version != CPSW_VERSION_1 &&
-           priv->version != CPSW_VERSION_2)
+           priv->version != CPSW_VERSION_2 &&
+           priv->version != CPSW_VERSION_3)
                return -EOPNOTSUPP;
 
        if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
@@ -1443,6 +1469,7 @@ static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
                cpsw_hwtstamp_v1(priv);
                break;
        case CPSW_VERSION_2:
+       case CPSW_VERSION_3:
                cpsw_hwtstamp_v2(priv);
                break;
        default:
@@ -1459,7 +1486,8 @@ static int cpsw_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
        struct hwtstamp_config cfg;
 
        if (priv->version != CPSW_VERSION_1 &&
-           priv->version != CPSW_VERSION_2)
+           priv->version != CPSW_VERSION_2 &&
+           priv->version != CPSW_VERSION_3)
                return -EOPNOTSUPP;
 
        cfg.flags = 0;
@@ -1780,25 +1808,25 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
                return -EINVAL;
 
        if (of_property_read_u32(node, "slaves", &prop)) {
-               pr_err("Missing slaves property in the DT.\n");
+               dev_err(&pdev->dev, "Missing slaves property in the DT.\n");
                return -EINVAL;
        }
        data->slaves = prop;
 
        if (of_property_read_u32(node, "active_slave", &prop)) {
-               pr_err("Missing active_slave property in the DT.\n");
+               dev_err(&pdev->dev, "Missing active_slave property in the DT.\n");
                return -EINVAL;
        }
        data->active_slave = prop;
 
        if (of_property_read_u32(node, "cpts_clock_mult", &prop)) {
-               pr_err("Missing cpts_clock_mult property in the DT.\n");
+               dev_err(&pdev->dev, "Missing cpts_clock_mult property in the DT.\n");
                return -EINVAL;
        }
        data->cpts_clock_mult = prop;
 
        if (of_property_read_u32(node, "cpts_clock_shift", &prop)) {
-               pr_err("Missing cpts_clock_shift property in the DT.\n");
+               dev_err(&pdev->dev, "Missing cpts_clock_shift property in the DT.\n");
                return -EINVAL;
        }
        data->cpts_clock_shift = prop;
@@ -1810,31 +1838,31 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
                return -ENOMEM;
 
        if (of_property_read_u32(node, "cpdma_channels", &prop)) {
-               pr_err("Missing cpdma_channels property in the DT.\n");
+               dev_err(&pdev->dev, "Missing cpdma_channels property in the DT.\n");
                return -EINVAL;
        }
        data->channels = prop;
 
        if (of_property_read_u32(node, "ale_entries", &prop)) {
-               pr_err("Missing ale_entries property in the DT.\n");
+               dev_err(&pdev->dev, "Missing ale_entries property in the DT.\n");
                return -EINVAL;
        }
        data->ale_entries = prop;
 
        if (of_property_read_u32(node, "bd_ram_size", &prop)) {
-               pr_err("Missing bd_ram_size property in the DT.\n");
+               dev_err(&pdev->dev, "Missing bd_ram_size property in the DT.\n");
                return -EINVAL;
        }
        data->bd_ram_size = prop;
 
        if (of_property_read_u32(node, "rx_descs", &prop)) {
-               pr_err("Missing rx_descs property in the DT.\n");
+               dev_err(&pdev->dev, "Missing rx_descs property in the DT.\n");
                return -EINVAL;
        }
        data->rx_descs = prop;
 
        if (of_property_read_u32(node, "mac_control", &prop)) {
-               pr_err("Missing mac_control property in the DT.\n");
+               dev_err(&pdev->dev, "Missing mac_control property in the DT.\n");
                return -EINVAL;
        }
        data->mac_control = prop;
@@ -1848,7 +1876,7 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
        ret = of_platform_populate(node, NULL, NULL, &pdev->dev);
        /* We do not want to force this, as in some cases may not have child */
        if (ret)
-               pr_warn("Doesn't have any child node\n");
+               dev_warn(&pdev->dev, "Doesn't have any child node\n");
 
        for_each_child_of_node(node, slave_node) {
                struct cpsw_slave_data *slave_data = data->slave_data + i;
@@ -1865,24 +1893,19 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
 
                parp = of_get_property(slave_node, "phy_id", &lenp);
                if ((parp == NULL) || (lenp != (sizeof(void *) * 2))) {
-                       pr_err("Missing slave[%d] phy_id property\n", i);
+                       dev_err(&pdev->dev, "Missing slave[%d] phy_id property\n", i);
                        return -EINVAL;
                }
                mdio_node = of_find_node_by_phandle(be32_to_cpup(parp));
                phyid = be32_to_cpup(parp+1);
                mdio = of_find_device_by_node(mdio_node);
-
-               if (strncmp(mdio->name, "gpio", 4) == 0) {
-                       /* GPIO bitbang MDIO driver attached */
-                       struct mii_bus *bus = dev_get_drvdata(&mdio->dev);
-
-                       snprintf(slave_data->phy_id, sizeof(slave_data->phy_id),
-                                PHY_ID_FMT, bus->id, phyid);
-               } else {
-                       /* davinci MDIO driver attached */
-                       snprintf(slave_data->phy_id, sizeof(slave_data->phy_id),
-                                PHY_ID_FMT, mdio->name, phyid);
+               of_node_put(mdio_node);
+               if (!mdio) {
+                       pr_err("Missing mdio platform device\n");
+                       return -EINVAL;
                }
+               snprintf(slave_data->phy_id, sizeof(slave_data->phy_id),
+                        PHY_ID_FMT, mdio->name, phyid);
 
                mac_addr = of_get_mac_address(slave_node);
                if (mac_addr)
@@ -1890,18 +1913,18 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
 
                slave_data->phy_if = of_get_phy_mode(slave_node);
                if (slave_data->phy_if < 0) {
-                       pr_err("Missing or malformed slave[%d] phy-mode property\n",
-                              i);
+                       dev_err(&pdev->dev, "Missing or malformed slave[%d] phy-mode property\n",
+                               i);
                        return slave_data->phy_if;
                }
 
                if (data->dual_emac) {
                        if (of_property_read_u32(slave_node, "dual_emac_res_vlan",
                                                 &prop)) {
-                               pr_err("Missing dual_emac_res_vlan in DT.\n");
+                               dev_err(&pdev->dev, "Missing dual_emac_res_vlan in DT.\n");
                                slave_data->dual_emac_res_vlan = i+1;
-                               pr_err("Using %d as Reserved VLAN for %d slave\n",
-                                      slave_data->dual_emac_res_vlan, i);
+                               dev_err(&pdev->dev, "Using %d as Reserved VLAN for %d slave\n",
+                                       slave_data->dual_emac_res_vlan, i);
                        } else {
                                slave_data->dual_emac_res_vlan = prop;
                        }
@@ -1925,7 +1948,7 @@ static int cpsw_probe_dual_emac(struct platform_device *pdev,
 
        ndev = alloc_etherdev(sizeof(struct cpsw_priv));
        if (!ndev) {
-               pr_err("cpsw: error allocating net_device\n");
+               dev_err(&pdev->dev, "cpsw: error allocating net_device\n");
                return -ENOMEM;
        }
 
@@ -1941,10 +1964,10 @@ static int cpsw_probe_dual_emac(struct platform_device *pdev,
        if (is_valid_ether_addr(data->slave_data[1].mac_addr)) {
                memcpy(priv_sl2->mac_addr, data->slave_data[1].mac_addr,
                        ETH_ALEN);
-               pr_info("cpsw: Detected MACID = %pM\n", priv_sl2->mac_addr);
+               dev_info(&pdev->dev, "cpsw: Detected MACID = %pM\n", priv_sl2->mac_addr);
        } else {
                random_ether_addr(priv_sl2->mac_addr);
-               pr_info("cpsw: Random MACID = %pM\n", priv_sl2->mac_addr);
+               dev_info(&pdev->dev, "cpsw: Random MACID = %pM\n", priv_sl2->mac_addr);
        }
        memcpy(ndev->dev_addr, priv_sl2->mac_addr, ETH_ALEN);
 
@@ -1975,14 +1998,14 @@ static int cpsw_probe_dual_emac(struct platform_device *pdev,
        ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
 
        ndev->netdev_ops = &cpsw_netdev_ops;
-       SET_ETHTOOL_OPS(ndev, &cpsw_ethtool_ops);
+       ndev->ethtool_ops = &cpsw_ethtool_ops;
        netif_napi_add(ndev, &priv_sl2->napi, cpsw_poll, CPSW_POLL_WEIGHT);
 
        /* register the network device */
        SET_NETDEV_DEV(ndev, &pdev->dev);
        ret = register_netdev(ndev);
        if (ret) {
-               pr_err("cpsw: error registering net device\n");
+               dev_err(&pdev->dev, "cpsw: error registering net device\n");
                free_netdev(ndev);
                ret = -ENODEV;
        }
@@ -2004,7 +2027,7 @@ static int cpsw_probe(struct platform_device *pdev)
 
        ndev = alloc_etherdev(sizeof(struct cpsw_priv));
        if (!ndev) {
-               pr_err("error allocating net_device\n");
+               dev_err(&pdev->dev, "error allocating net_device\n");
                return -ENOMEM;
        }
 
@@ -2019,7 +2042,7 @@ static int cpsw_probe(struct platform_device *pdev)
        priv->cpts = devm_kzalloc(&pdev->dev, sizeof(struct cpts), GFP_KERNEL);
        priv->irq_enabled = true;
        if (!priv->cpts) {
-               pr_err("error allocating cpts\n");
+               dev_err(&pdev->dev, "error allocating cpts\n");
                goto clean_ndev_ret;
        }
 
@@ -2032,7 +2055,7 @@ static int cpsw_probe(struct platform_device *pdev)
        pinctrl_pm_select_default_state(&pdev->dev);
 
        if (cpsw_probe_dt(&priv->data, pdev)) {
-               pr_err("cpsw: platform data missing\n");
+               dev_err(&pdev->dev, "cpsw: platform data missing\n");
                ret = -ENODEV;
                goto clean_runtime_disable_ret;
        }
@@ -2040,10 +2063,10 @@ static int cpsw_probe(struct platform_device *pdev)
 
        if (is_valid_ether_addr(data->slave_data[0].mac_addr)) {
                memcpy(priv->mac_addr, data->slave_data[0].mac_addr, ETH_ALEN);
-               pr_info("Detected MACID = %pM\n", priv->mac_addr);
+               dev_info(&pdev->dev, "Detected MACID = %pM\n", priv->mac_addr);
        } else {
                eth_random_addr(priv->mac_addr);
-               pr_info("Random MACID = %pM\n", priv->mac_addr);
+               dev_info(&pdev->dev, "Random MACID = %pM\n", priv->mac_addr);
        }
 
        memcpy(ndev->dev_addr, priv->mac_addr, ETH_ALEN);
@@ -2204,7 +2227,7 @@ static int cpsw_probe(struct platform_device *pdev)
        ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
 
        ndev->netdev_ops = &cpsw_netdev_ops;
-       SET_ETHTOOL_OPS(ndev, &cpsw_ethtool_ops);
+       ndev->ethtool_ops = &cpsw_ethtool_ops;
        netif_napi_add(ndev, &priv->napi, cpsw_poll, CPSW_POLL_WEIGHT);
 
        /* register the network device */
index 243513980b51511a9c3054d71c8a270d63949cb1..6b56f85951e581826afc152109d0eee4b53dd08d 100644 (file)
@@ -236,13 +236,11 @@ static void cpts_overflow_check(struct work_struct *work)
        schedule_delayed_work(&cpts->overflow_work, CPTS_OVERFLOW_PERIOD);
 }
 
-#define CPTS_REF_CLOCK_NAME "cpsw_cpts_rft_clk"
-
-static void cpts_clk_init(struct cpts *cpts)
+static void cpts_clk_init(struct device *dev, struct cpts *cpts)
 {
-       cpts->refclk = clk_get(NULL, CPTS_REF_CLOCK_NAME);
+       cpts->refclk = devm_clk_get(dev, "cpts");
        if (IS_ERR(cpts->refclk)) {
-               pr_err("Failed to clk_get %s\n", CPTS_REF_CLOCK_NAME);
+               dev_err(dev, "Failed to get cpts refclk\n");
                cpts->refclk = NULL;
                return;
        }
@@ -252,7 +250,6 @@ static void cpts_clk_init(struct cpts *cpts)
 static void cpts_clk_release(struct cpts *cpts)
 {
        clk_disable(cpts->refclk);
-       clk_put(cpts->refclk);
 }
 
 static int cpts_match(struct sk_buff *skb, unsigned int ptp_class,
@@ -390,7 +387,7 @@ int cpts_register(struct device *dev, struct cpts *cpts,
        for (i = 0; i < CPTS_MAX_EVENTS; i++)
                list_add(&cpts->pool_data[i].list, &cpts->pool);
 
-       cpts_clk_init(cpts);
+       cpts_clk_init(dev, cpts);
        cpts_write32(cpts, CPTS_EN, control);
        cpts_write32(cpts, TS_PEND_EN, int_enable);
 
index 88ef27067bf24a8b2569f533b63ac223d52280fe..539dbdecd310e870ea98ae05bcd989b4aebc5dfb 100644 (file)
@@ -158,9 +158,9 @@ cpdma_desc_pool_create(struct device *dev, u32 phys, u32 hw_addr,
        int bitmap_size;
        struct cpdma_desc_pool *pool;
 
-       pool = kzalloc(sizeof(*pool), GFP_KERNEL);
+       pool = devm_kzalloc(dev, sizeof(*pool), GFP_KERNEL);
        if (!pool)
-               return NULL;
+               goto fail;
 
        spin_lock_init(&pool->lock);
 
@@ -170,7 +170,7 @@ cpdma_desc_pool_create(struct device *dev, u32 phys, u32 hw_addr,
        pool->num_desc  = size / pool->desc_size;
 
        bitmap_size  = (pool->num_desc / BITS_PER_LONG) * sizeof(long);
-       pool->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
+       pool->bitmap = devm_kzalloc(dev, bitmap_size, GFP_KERNEL);
        if (!pool->bitmap)
                goto fail;
 
@@ -187,10 +187,7 @@ cpdma_desc_pool_create(struct device *dev, u32 phys, u32 hw_addr,
 
        if (pool->iomap)
                return pool;
-
 fail:
-       kfree(pool->bitmap);
-       kfree(pool);
        return NULL;
 }
 
@@ -203,7 +200,6 @@ static void cpdma_desc_pool_destroy(struct cpdma_desc_pool *pool)
 
        spin_lock_irqsave(&pool->lock, flags);
        WARN_ON(pool->used_desc);
-       kfree(pool->bitmap);
        if (pool->cpumap) {
                dma_free_coherent(pool->dev, pool->mem_size, pool->cpumap,
                                  pool->phys);
@@ -211,7 +207,6 @@ static void cpdma_desc_pool_destroy(struct cpdma_desc_pool *pool)
                iounmap(pool->iomap);
        }
        spin_unlock_irqrestore(&pool->lock, flags);
-       kfree(pool);
 }
 
 static inline dma_addr_t desc_phys(struct cpdma_desc_pool *pool,
@@ -276,7 +271,7 @@ struct cpdma_ctlr *cpdma_ctlr_create(struct cpdma_params *params)
 {
        struct cpdma_ctlr *ctlr;
 
-       ctlr = kzalloc(sizeof(*ctlr), GFP_KERNEL);
+       ctlr = devm_kzalloc(params->dev, sizeof(*ctlr), GFP_KERNEL);
        if (!ctlr)
                return NULL;
 
@@ -468,7 +463,6 @@ int cpdma_ctlr_destroy(struct cpdma_ctlr *ctlr)
 
        cpdma_desc_pool_destroy(ctlr->pool);
        spin_unlock_irqrestore(&ctlr->lock, flags);
-       kfree(ctlr);
        return ret;
 }
 EXPORT_SYMBOL_GPL(cpdma_ctlr_destroy);
@@ -507,21 +501,22 @@ struct cpdma_chan *cpdma_chan_create(struct cpdma_ctlr *ctlr, int chan_num,
                                     cpdma_handler_fn handler)
 {
        struct cpdma_chan *chan;
-       int ret, offset = (chan_num % CPDMA_MAX_CHANNELS) * 4;
+       int offset = (chan_num % CPDMA_MAX_CHANNELS) * 4;
        unsigned long flags;
 
        if (__chan_linear(chan_num) >= ctlr->num_chan)
                return NULL;
 
-       ret = -ENOMEM;
-       chan = kzalloc(sizeof(*chan), GFP_KERNEL);
+       chan = devm_kzalloc(ctlr->dev, sizeof(*chan), GFP_KERNEL);
        if (!chan)
-               goto err_chan_alloc;
+               return ERR_PTR(-ENOMEM);
 
        spin_lock_irqsave(&ctlr->lock, flags);
-       ret = -EBUSY;
-       if (ctlr->channels[chan_num])
-               goto err_chan_busy;
+       if (ctlr->channels[chan_num]) {
+               spin_unlock_irqrestore(&ctlr->lock, flags);
+               devm_kfree(ctlr->dev, chan);
+               return ERR_PTR(-EBUSY);
+       }
 
        chan->ctlr      = ctlr;
        chan->state     = CPDMA_STATE_IDLE;
@@ -551,12 +546,6 @@ struct cpdma_chan *cpdma_chan_create(struct cpdma_ctlr *ctlr, int chan_num,
        ctlr->channels[chan_num] = chan;
        spin_unlock_irqrestore(&ctlr->lock, flags);
        return chan;
-
-err_chan_busy:
-       spin_unlock_irqrestore(&ctlr->lock, flags);
-       kfree(chan);
-err_chan_alloc:
-       return ERR_PTR(ret);
 }
 EXPORT_SYMBOL_GPL(cpdma_chan_create);
 
index 8f0e69ce07ca3e03cfbf4cf1b22c92c9af27beeb..f32d730f55cc9f5c512d0515fb0e72f4b9eb89a4 100644 (file)
@@ -1865,7 +1865,6 @@ static int davinci_emac_probe(struct platform_device *pdev)
        struct emac_priv *priv;
        unsigned long hw_ram_addr;
        struct emac_platform_data *pdata;
-       struct device *emac_dev;
        struct cpdma_params dma_params;
        struct clk *emac_clk;
        unsigned long emac_bus_frequency;
@@ -1911,7 +1910,6 @@ static int davinci_emac_probe(struct platform_device *pdev)
        priv->coal_intvl = 0;
        priv->bus_freq_mhz = (u32)(emac_bus_frequency / 1000000);
 
-       emac_dev = &ndev->dev;
        /* Get EMAC platform data */
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        priv->emac_base_phys = res->start + pdata->ctrl_reg_offset;
@@ -1930,7 +1928,7 @@ static int davinci_emac_probe(struct platform_device *pdev)
                hw_ram_addr = (u32 __force)res->start + pdata->ctrl_ram_offset;
 
        memset(&dma_params, 0, sizeof(dma_params));
-       dma_params.dev                  = emac_dev;
+       dma_params.dev                  = &pdev->dev;
        dma_params.dmaregs              = priv->emac_base;
        dma_params.rxthresh             = priv->emac_base + 0x120;
        dma_params.rxfree               = priv->emac_base + 0x140;
@@ -1980,7 +1978,7 @@ static int davinci_emac_probe(struct platform_device *pdev)
        }
 
        ndev->netdev_ops = &emac_netdev_ops;
-       SET_ETHTOOL_OPS(ndev, &ethtool_ops);
+       ndev->ethtool_ops = &ethtool_ops;
        netif_napi_add(ndev, &priv->napi, emac_poll, EMAC_POLL_WEIGHT);
 
        /* register the network device */
@@ -1994,7 +1992,7 @@ static int davinci_emac_probe(struct platform_device *pdev)
 
 
        if (netif_msg_probe(priv)) {
-               dev_notice(emac_dev, "DaVinci EMAC Probe found device "\
+               dev_notice(&pdev->dev, "DaVinci EMAC Probe found device "
                           "(regs: %p, irq: %d)\n",
                           (void *)priv->emac_base_phys, ndev->irq);
        }
index 0cca9dec5d8277542a4439aed4bb8953f3943b29..735dc53d4b0163be05eec83c83a4fb8bb4612497 100644 (file)
@@ -303,7 +303,7 @@ static int davinci_mdio_probe_dt(struct mdio_platform_data *data,
                return -EINVAL;
 
        if (of_property_read_u32(node, "bus_freq", &prop)) {
-               pr_err("Missing bus_freq property in the DT.\n");
+               dev_err(&pdev->dev, "Missing bus_freq property in the DT.\n");
                return -EINVAL;
        }
        data->bus_freq = prop;
@@ -321,15 +321,14 @@ static int davinci_mdio_probe(struct platform_device *pdev)
        struct phy_device *phy;
        int ret, addr;
 
-       data = kzalloc(sizeof(*data), GFP_KERNEL);
+       data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
        if (!data)
                return -ENOMEM;
 
-       data->bus = mdiobus_alloc();
+       data->bus = devm_mdiobus_alloc(dev);
        if (!data->bus) {
                dev_err(dev, "failed to alloc mii bus\n");
-               ret = -ENOMEM;
-               goto bail_out;
+               return -ENOMEM;
        }
 
        if (dev->of_node) {
@@ -349,12 +348,9 @@ static int davinci_mdio_probe(struct platform_device *pdev)
        data->bus->parent       = dev;
        data->bus->priv         = data;
 
-       /* Select default pin state */
-       pinctrl_pm_select_default_state(&pdev->dev);
-
        pm_runtime_enable(&pdev->dev);
        pm_runtime_get_sync(&pdev->dev);
-       data->clk = clk_get(&pdev->dev, "fck");
+       data->clk = devm_clk_get(dev, "fck");
        if (IS_ERR(data->clk)) {
                dev_err(dev, "failed to get device clock\n");
                ret = PTR_ERR(data->clk);
@@ -367,24 +363,9 @@ static int davinci_mdio_probe(struct platform_device *pdev)
        spin_lock_init(&data->lock);
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!res) {
-               dev_err(dev, "could not find register map resource\n");
-               ret = -ENOENT;
-               goto bail_out;
-       }
-
-       res = devm_request_mem_region(dev, res->start, resource_size(res),
-                                           dev_name(dev));
-       if (!res) {
-               dev_err(dev, "could not allocate register map resource\n");
-               ret = -ENXIO;
-               goto bail_out;
-       }
-
-       data->regs = devm_ioremap_nocache(dev, res->start, resource_size(res));
-       if (!data->regs) {
-               dev_err(dev, "could not map mdio registers\n");
-               ret = -ENOMEM;
+       data->regs = devm_ioremap_resource(dev, res);
+       if (IS_ERR(data->regs)) {
+               ret = PTR_ERR(data->regs);
                goto bail_out;
        }
 
@@ -406,16 +387,9 @@ static int davinci_mdio_probe(struct platform_device *pdev)
        return 0;
 
 bail_out:
-       if (data->bus)
-               mdiobus_free(data->bus);
-
-       if (data->clk)
-               clk_put(data->clk);
        pm_runtime_put_sync(&pdev->dev);
        pm_runtime_disable(&pdev->dev);
 
-       kfree(data);
-
        return ret;
 }
 
@@ -423,18 +397,12 @@ static int davinci_mdio_remove(struct platform_device *pdev)
 {
        struct davinci_mdio_data *data = platform_get_drvdata(pdev);
 
-       if (data->bus) {
+       if (data->bus)
                mdiobus_unregister(data->bus);
-               mdiobus_free(data->bus);
-       }
 
-       if (data->clk)
-               clk_put(data->clk);
        pm_runtime_put_sync(&pdev->dev);
        pm_runtime_disable(&pdev->dev);
 
-       kfree(data);
-
        return 0;
 }
 
index 8a049a2b44742aabe24221b12a6bcb2a4caf357f..f66ddaee0c877f1620ad123fd9ef8402703aa1a6 100644 (file)
@@ -19,7 +19,7 @@ if NET_VENDOR_VIA
 
 config VIA_RHINE
        tristate "VIA Rhine support"
-       depends on PCI
+       depends on (PCI || USE_OF)
        select CRC32
        select MII
        ---help---
index f61dc2b72bb2f43780ace58a503bd2e3b89f61a9..981be0154be393931d958f8310bd2e0572184d04 100644 (file)
@@ -94,6 +94,10 @@ static const int multicast_filter_limit = 32;
 #include <linux/ioport.h>
 #include <linux/interrupt.h>
 #include <linux/pci.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
 #include <linux/dma-mapping.h>
 #include <linux/netdevice.h>
 #include <linux/etherdevice.h>
@@ -116,13 +120,6 @@ static const int multicast_filter_limit = 32;
 static const char version[] =
        "v1.10-LK" DRV_VERSION " " DRV_RELDATE " Written by Donald Becker";
 
-/* This driver was written to use PCI memory space. Some early versions
-   of the Rhine may only work correctly with I/O space accesses. */
-#ifdef CONFIG_VIA_RHINE_MMIO
-#define USE_MMIO
-#else
-#endif
-
 MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
 MODULE_DESCRIPTION("VIA Rhine PCI Fast Ethernet driver");
 MODULE_LICENSE("GPL");
@@ -260,6 +257,12 @@ enum rhine_quirks {
        rq6patterns     = 0x0040,       /* 6 instead of 4 patterns for WOL */
        rqStatusWBRace  = 0x0080,       /* Tx Status Writeback Error possible */
        rqRhineI        = 0x0100,       /* See comment below */
+       rqIntPHY        = 0x0200,       /* Integrated PHY */
+       rqMgmt          = 0x0400,       /* Management adapter */
+       rqNeedEnMMIO    = 0x0800,       /* Whether the core needs to be
+                                        * switched from PIO mode to MMIO
+                                        * (only applies to PCI)
+                                        */
 };
 /*
  * rqRhineI: VT86C100A (aka Rhine-I) uses different bits to enable
@@ -279,6 +282,15 @@ static DEFINE_PCI_DEVICE_TABLE(rhine_pci_tbl) = {
 };
 MODULE_DEVICE_TABLE(pci, rhine_pci_tbl);
 
+/* OpenFirmware identifiers for platform-bus devices
+ * The .data field is currently only used to store quirks
+ */
+static u32 vt8500_quirks = rqWOL | rqForceReset | rq6patterns;
+static struct of_device_id rhine_of_tbl[] = {
+       { .compatible = "via,vt8500-rhine", .data = &vt8500_quirks },
+       { }     /* terminate list */
+};
+MODULE_DEVICE_TABLE(of, rhine_of_tbl);
 
 /* Offsets to the device registers. */
 enum register_offsets {
@@ -338,13 +350,11 @@ enum bcr1_bits {
        BCR1_MED1=0x80,         /* for VT6102 */
 };
 
-#ifdef USE_MMIO
 /* Registers we check that mmio and reg are the same. */
 static const int mmio_verify_registers[] = {
        RxConfig, TxConfig, IntrEnable, ConfigA, ConfigB, ConfigC, ConfigD,
        0
 };
-#endif
 
 /* Bits in the interrupt status/mask registers. */
 enum intr_status_bits {
@@ -446,7 +456,7 @@ struct rhine_private {
        unsigned char *tx_bufs;
        dma_addr_t tx_bufs_dma;
 
-       struct pci_dev *pdev;
+       int irq;
        long pioaddr;
        struct net_device *dev;
        struct napi_struct napi;
@@ -649,20 +659,46 @@ static void rhine_chip_reset(struct net_device *dev)
                   "failed" : "succeeded");
 }
 
-#ifdef USE_MMIO
 static void enable_mmio(long pioaddr, u32 quirks)
 {
        int n;
-       if (quirks & rqRhineI) {
-               /* More recent docs say that this bit is reserved ... */
-               n = inb(pioaddr + ConfigA) | 0x20;
-               outb(n, pioaddr + ConfigA);
-       } else {
-               n = inb(pioaddr + ConfigD) | 0x80;
-               outb(n, pioaddr + ConfigD);
+
+       if (quirks & rqNeedEnMMIO) {
+               if (quirks & rqRhineI) {
+                       /* More recent docs say that this bit is reserved */
+                       n = inb(pioaddr + ConfigA) | 0x20;
+                       outb(n, pioaddr + ConfigA);
+               } else {
+                       n = inb(pioaddr + ConfigD) | 0x80;
+                       outb(n, pioaddr + ConfigD);
+               }
        }
 }
-#endif
+
+static inline int verify_mmio(struct device *hwdev,
+                             long pioaddr,
+                             void __iomem *ioaddr,
+                             u32 quirks)
+{
+       if (quirks & rqNeedEnMMIO) {
+               int i = 0;
+
+               /* Check that selected MMIO registers match the PIO ones */
+               while (mmio_verify_registers[i]) {
+                       int reg = mmio_verify_registers[i++];
+                       unsigned char a = inb(pioaddr+reg);
+                       unsigned char b = readb(ioaddr+reg);
+
+                       if (a != b) {
+                               dev_err(hwdev,
+                                       "MMIO do not match PIO [%02x] (%02x != %02x)\n",
+                                       reg, a, b);
+                               return -EIO;
+                       }
+               }
+       }
+       return 0;
+}
 
 /*
  * Loads bytes 0x00-0x05, 0x6E-0x6F, 0x78-0x7B from EEPROM
@@ -682,14 +718,12 @@ static void rhine_reload_eeprom(long pioaddr, struct net_device *dev)
        if (i > 512)
                pr_info("%4d cycles used @ %s:%d\n", i, __func__, __LINE__);
 
-#ifdef USE_MMIO
        /*
         * Reloading from EEPROM overwrites ConfigA-D, so we must re-enable
         * MMIO. If reloading EEPROM was done first this could be avoided, but
         * it is not known if that still works with the "win98-reboot" problem.
         */
        enable_mmio(pioaddr, rp->quirks);
-#endif
 
        /* Turn off EEPROM-controlled wake-up (magic packet) */
        if (rp->quirks & rqWOL)
@@ -701,7 +735,7 @@ static void rhine_reload_eeprom(long pioaddr, struct net_device *dev)
 static void rhine_poll(struct net_device *dev)
 {
        struct rhine_private *rp = netdev_priv(dev);
-       const int irq = rp->pdev->irq;
+       const int irq = rp->irq;
 
        disable_irq(irq);
        rhine_interrupt(irq, dev);
@@ -846,7 +880,8 @@ static void rhine_hw_init(struct net_device *dev, long pioaddr)
                msleep(5);
 
        /* Reload EEPROM controlled bytes cleared by soft reset */
-       rhine_reload_eeprom(pioaddr, dev);
+       if (dev_is_pci(dev->dev.parent))
+               rhine_reload_eeprom(pioaddr, dev);
 }
 
 static const struct net_device_ops rhine_netdev_ops = {
@@ -867,125 +902,37 @@ static const struct net_device_ops rhine_netdev_ops = {
 #endif
 };
 
-static int rhine_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+static int rhine_init_one_common(struct device *hwdev, u32 quirks,
+                                long pioaddr, void __iomem *ioaddr, int irq)
 {
        struct net_device *dev;
        struct rhine_private *rp;
-       int i, rc;
-       u32 quirks;
-       long pioaddr;
-       long memaddr;
-       void __iomem *ioaddr;
-       int io_size, phy_id;
+       int i, rc, phy_id;
        const char *name;
-#ifdef USE_MMIO
-       int bar = 1;
-#else
-       int bar = 0;
-#endif
-
-/* when built into the kernel, we only print version if device is found */
-#ifndef MODULE
-       pr_info_once("%s\n", version);
-#endif
-
-       io_size = 256;
-       phy_id = 0;
-       quirks = 0;
-       name = "Rhine";
-       if (pdev->revision < VTunknown0) {
-               quirks = rqRhineI;
-               io_size = 128;
-       }
-       else if (pdev->revision >= VT6102) {
-               quirks = rqWOL | rqForceReset;
-               if (pdev->revision < VT6105) {
-                       name = "Rhine II";
-                       quirks |= rqStatusWBRace;       /* Rhine-II exclusive */
-               }
-               else {
-                       phy_id = 1;     /* Integrated PHY, phy_id fixed to 1 */
-                       if (pdev->revision >= VT6105_B0)
-                               quirks |= rq6patterns;
-                       if (pdev->revision < VT6105M)
-                               name = "Rhine III";
-                       else
-                               name = "Rhine III (Management Adapter)";
-               }
-       }
-
-       rc = pci_enable_device(pdev);
-       if (rc)
-               goto err_out;
 
        /* this should always be supported */
-       rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+       rc = dma_set_mask(hwdev, DMA_BIT_MASK(32));
        if (rc) {
-               dev_err(&pdev->dev,
-                       "32-bit PCI DMA addresses not supported by the card!?\n");
-               goto err_out_pci_disable;
-       }
-
-       /* sanity check */
-       if ((pci_resource_len(pdev, 0) < io_size) ||
-           (pci_resource_len(pdev, 1) < io_size)) {
-               rc = -EIO;
-               dev_err(&pdev->dev, "Insufficient PCI resources, aborting\n");
-               goto err_out_pci_disable;
+               dev_err(hwdev, "32-bit DMA addresses not supported by the card!?\n");
+               goto err_out;
        }
 
-       pioaddr = pci_resource_start(pdev, 0);
-       memaddr = pci_resource_start(pdev, 1);
-
-       pci_set_master(pdev);
-
        dev = alloc_etherdev(sizeof(struct rhine_private));
        if (!dev) {
                rc = -ENOMEM;
-               goto err_out_pci_disable;
+               goto err_out;
        }
-       SET_NETDEV_DEV(dev, &pdev->dev);
+       SET_NETDEV_DEV(dev, hwdev);
 
        rp = netdev_priv(dev);
        rp->dev = dev;
        rp->quirks = quirks;
        rp->pioaddr = pioaddr;
-       rp->pdev = pdev;
+       rp->base = ioaddr;
+       rp->irq = irq;
        rp->msg_enable = netif_msg_init(debug, RHINE_MSG_DEFAULT);
 
-       rc = pci_request_regions(pdev, DRV_NAME);
-       if (rc)
-               goto err_out_free_netdev;
-
-       ioaddr = pci_iomap(pdev, bar, io_size);
-       if (!ioaddr) {
-               rc = -EIO;
-               dev_err(&pdev->dev,
-                       "ioremap failed for device %s, region 0x%X @ 0x%lX\n",
-                       pci_name(pdev), io_size, memaddr);
-               goto err_out_free_res;
-       }
-
-#ifdef USE_MMIO
-       enable_mmio(pioaddr, quirks);
-
-       /* Check that selected MMIO registers match the PIO ones */
-       i = 0;
-       while (mmio_verify_registers[i]) {
-               int reg = mmio_verify_registers[i++];
-               unsigned char a = inb(pioaddr+reg);
-               unsigned char b = readb(ioaddr+reg);
-               if (a != b) {
-                       rc = -EIO;
-                       dev_err(&pdev->dev,
-                               "MMIO do not match PIO [%02x] (%02x != %02x)\n",
-                               reg, a, b);
-                       goto err_out_unmap;
-               }
-       }
-#endif /* USE_MMIO */
-
-       rp->base = ioaddr;
+       phy_id = rp->quirks & rqIntPHY ? 1 : 0;
 
        u64_stats_init(&rp->tx_stats.syncp);
        u64_stats_init(&rp->rx_stats.syncp);
@@ -1030,7 +977,7 @@ static int rhine_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
        if (rp->quirks & rqRhineI)
                dev->features |= NETIF_F_SG|NETIF_F_HW_CSUM;
 
-       if (pdev->revision >= VT6105M)
+       if (rp->quirks & rqMgmt)
                dev->features |= NETIF_F_HW_VLAN_CTAG_TX |
                                 NETIF_F_HW_VLAN_CTAG_RX |
                                 NETIF_F_HW_VLAN_CTAG_FILTER;
@@ -1038,18 +985,21 @@ static int rhine_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
        /* dev->name not defined before register_netdev()! */
        rc = register_netdev(dev);
        if (rc)
-               goto err_out_unmap;
+               goto err_out_free_netdev;
+
+       if (rp->quirks & rqRhineI)
+               name = "Rhine";
+       else if (rp->quirks & rqStatusWBRace)
+               name = "Rhine II";
+       else if (rp->quirks & rqMgmt)
+               name = "Rhine III (Management Adapter)";
+       else
+               name = "Rhine III";
 
        netdev_info(dev, "VIA %s at 0x%lx, %pM, IRQ %d\n",
-                   name,
-#ifdef USE_MMIO
-                   memaddr,
-#else
-                   (long)ioaddr,
-#endif
-                   dev->dev_addr, pdev->irq);
+                   name, (long)ioaddr, dev->dev_addr, rp->irq);
 
-       pci_set_drvdata(pdev, dev);
+       dev_set_drvdata(hwdev, dev);
 
        {
                u16 mii_cmd;
@@ -1078,41 +1028,158 @@ static int rhine_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 
        return 0;
 
+err_out_free_netdev:
+       free_netdev(dev);
+err_out:
+       return rc;
+}
+
+static int rhine_init_one_pci(struct pci_dev *pdev,
+                             const struct pci_device_id *ent)
+{
+       struct device *hwdev = &pdev->dev;
+       int rc;
+       long pioaddr, memaddr;
+       void __iomem *ioaddr;
+       int io_size = pdev->revision < VTunknown0 ? 128 : 256;
+
+/* This driver was written to use PCI memory space. Some early versions
+ * of the Rhine may only work correctly with I/O space accesses.
+ * TODO: determine for which revisions this is true and assign the flag
+ *      in code as opposed to this Kconfig option (???)
+ */
+#ifdef CONFIG_VIA_RHINE_MMIO
+       u32 quirks = rqNeedEnMMIO;
+#else
+       u32 quirks = 0;
+#endif
+
+/* when built into the kernel, we only print version if device is found */
+#ifndef MODULE
+       pr_info_once("%s\n", version);
+#endif
+
+       rc = pci_enable_device(pdev);
+       if (rc)
+               goto err_out;
+
+       if (pdev->revision < VTunknown0) {
+               quirks |= rqRhineI;
+       } else if (pdev->revision >= VT6102) {
+               quirks |= rqWOL | rqForceReset;
+               if (pdev->revision < VT6105) {
+                       quirks |= rqStatusWBRace;
+               } else {
+                       quirks |= rqIntPHY;
+                       if (pdev->revision >= VT6105_B0)
+                               quirks |= rq6patterns;
+                       if (pdev->revision >= VT6105M)
+                               quirks |= rqMgmt;
+               }
+       }
+
+       /* sanity check */
+       if ((pci_resource_len(pdev, 0) < io_size) ||
+           (pci_resource_len(pdev, 1) < io_size)) {
+               rc = -EIO;
+               dev_err(hwdev, "Insufficient PCI resources, aborting\n");
+               goto err_out_pci_disable;
+       }
+
+       pioaddr = pci_resource_start(pdev, 0);
+       memaddr = pci_resource_start(pdev, 1);
+
+       pci_set_master(pdev);
+
+       rc = pci_request_regions(pdev, DRV_NAME);
+       if (rc)
+               goto err_out_pci_disable;
+
+       ioaddr = pci_iomap(pdev, (quirks & rqNeedEnMMIO ? 1 : 0), io_size);
+       if (!ioaddr) {
+               rc = -EIO;
+               dev_err(hwdev,
+                       "ioremap failed for device %s, region 0x%X @ 0x%lX\n",
+                       dev_name(hwdev), io_size, memaddr);
+               goto err_out_free_res;
+       }
+
+       enable_mmio(pioaddr, quirks);
+
+       rc = verify_mmio(hwdev, pioaddr, ioaddr, quirks);
+       if (rc)
+               goto err_out_unmap;
+
+       rc = rhine_init_one_common(&pdev->dev, quirks,
+                                  pioaddr, ioaddr, pdev->irq);
+       if (!rc)
+               return 0;
+
 err_out_unmap:
        pci_iounmap(pdev, ioaddr);
 err_out_free_res:
        pci_release_regions(pdev);
-err_out_free_netdev:
-       free_netdev(dev);
 err_out_pci_disable:
        pci_disable_device(pdev);
 err_out:
        return rc;
 }
 
+static int rhine_init_one_platform(struct platform_device *pdev)
+{
+       const struct of_device_id *match;
+       const u32 *quirks;
+       int irq;
+       struct resource *res;
+       void __iomem *ioaddr;
+
+       match = of_match_device(rhine_of_tbl, &pdev->dev);
+       if (!match)
+               return -EINVAL;
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       ioaddr = devm_ioremap_resource(&pdev->dev, res);
+       if (IS_ERR(ioaddr))
+               return PTR_ERR(ioaddr);
+
+       irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
+       if (!irq)
+               return -EINVAL;
+
+       quirks = match->data;
+       if (!quirks)
+               return -EINVAL;
+
+       return rhine_init_one_common(&pdev->dev, *quirks,
+                                    (long)ioaddr, ioaddr, irq);
+}
+
 static int alloc_ring(struct net_device* dev)
 {
        struct rhine_private *rp = netdev_priv(dev);
+       struct device *hwdev = dev->dev.parent;
        void *ring;
        dma_addr_t ring_dma;
 
-       ring = pci_alloc_consistent(rp->pdev,
-                                   RX_RING_SIZE * sizeof(struct rx_desc) +
-                                   TX_RING_SIZE * sizeof(struct tx_desc),
-                                   &ring_dma);
+       ring = dma_alloc_coherent(hwdev,
+                                 RX_RING_SIZE * sizeof(struct rx_desc) +
+                                 TX_RING_SIZE * sizeof(struct tx_desc),
+                                 &ring_dma,
+                                 GFP_ATOMIC);
        if (!ring) {
                netdev_err(dev, "Could not allocate DMA memory\n");
                return -ENOMEM;
        }
        if (rp->quirks & rqRhineI) {
-               rp->tx_bufs = pci_alloc_consistent(rp->pdev,
-                                                  PKT_BUF_SZ * TX_RING_SIZE,
-                                                  &rp->tx_bufs_dma);
+               rp->tx_bufs = dma_alloc_coherent(hwdev,
+                                                PKT_BUF_SZ * TX_RING_SIZE,
+                                                &rp->tx_bufs_dma,
+                                                GFP_ATOMIC);
                if (rp->tx_bufs == NULL) {
-                       pci_free_consistent(rp->pdev,
-                                   RX_RING_SIZE * sizeof(struct rx_desc) +
-                                   TX_RING_SIZE * sizeof(struct tx_desc),
-                                   ring, ring_dma);
+                       dma_free_coherent(hwdev,
+                                         RX_RING_SIZE * sizeof(struct rx_desc) +
+                                         TX_RING_SIZE * sizeof(struct tx_desc),
+                                         ring, ring_dma);
                        return -ENOMEM;
                }
        }
@@ -1128,16 +1195,17 @@ static int alloc_ring(struct net_device* dev)
 static void free_ring(struct net_device* dev)
 {
        struct rhine_private *rp = netdev_priv(dev);
+       struct device *hwdev = dev->dev.parent;
 
-       pci_free_consistent(rp->pdev,
-                           RX_RING_SIZE * sizeof(struct rx_desc) +
-                           TX_RING_SIZE * sizeof(struct tx_desc),
-                           rp->rx_ring, rp->rx_ring_dma);
+       dma_free_coherent(hwdev,
+                         RX_RING_SIZE * sizeof(struct rx_desc) +
+                         TX_RING_SIZE * sizeof(struct tx_desc),
+                         rp->rx_ring, rp->rx_ring_dma);
        rp->tx_ring = NULL;
 
        if (rp->tx_bufs)
-               pci_free_consistent(rp->pdev, PKT_BUF_SZ * TX_RING_SIZE,
-                                   rp->tx_bufs, rp->tx_bufs_dma);
+               dma_free_coherent(hwdev, PKT_BUF_SZ * TX_RING_SIZE,
+                                 rp->tx_bufs, rp->tx_bufs_dma);
 
        rp->tx_bufs = NULL;
 
@@ -1146,6 +1214,7 @@ static void free_ring(struct net_device* dev)
 static void alloc_rbufs(struct net_device *dev)
 {
        struct rhine_private *rp = netdev_priv(dev);
+       struct device *hwdev = dev->dev.parent;
        dma_addr_t next;
        int i;
 
@@ -1174,9 +1243,9 @@ static void alloc_rbufs(struct net_device *dev)
                        break;
 
                rp->rx_skbuff_dma[i] =
-                       pci_map_single(rp->pdev, skb->data, rp->rx_buf_sz,
-                                      PCI_DMA_FROMDEVICE);
-               if (dma_mapping_error(&rp->pdev->dev, rp->rx_skbuff_dma[i])) {
+                       dma_map_single(hwdev, skb->data, rp->rx_buf_sz,
+                                      DMA_FROM_DEVICE);
+               if (dma_mapping_error(hwdev, rp->rx_skbuff_dma[i])) {
                        rp->rx_skbuff_dma[i] = 0;
                        dev_kfree_skb(skb);
                        break;
@@ -1190,6 +1259,7 @@ static void alloc_rbufs(struct net_device *dev)
 static void free_rbufs(struct net_device* dev)
 {
        struct rhine_private *rp = netdev_priv(dev);
+       struct device *hwdev = dev->dev.parent;
        int i;
 
        /* Free all the skbuffs in the Rx queue. */
@@ -1197,9 +1267,9 @@ static void free_rbufs(struct net_device* dev)
                rp->rx_ring[i].rx_status = 0;
                rp->rx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
                if (rp->rx_skbuff[i]) {
-                       pci_unmap_single(rp->pdev,
+                       dma_unmap_single(hwdev,
                                         rp->rx_skbuff_dma[i],
-                                        rp->rx_buf_sz, PCI_DMA_FROMDEVICE);
+                                        rp->rx_buf_sz, DMA_FROM_DEVICE);
                        dev_kfree_skb(rp->rx_skbuff[i]);
                }
                rp->rx_skbuff[i] = NULL;
@@ -1230,6 +1300,7 @@ static void alloc_tbufs(struct net_device* dev)
 static void free_tbufs(struct net_device* dev)
 {
        struct rhine_private *rp = netdev_priv(dev);
+       struct device *hwdev = dev->dev.parent;
        int i;
 
        for (i = 0; i < TX_RING_SIZE; i++) {
@@ -1238,10 +1309,10 @@ static void free_tbufs(struct net_device* dev)
                rp->tx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
                if (rp->tx_skbuff[i]) {
                        if (rp->tx_skbuff_dma[i]) {
-                               pci_unmap_single(rp->pdev,
+                               dma_unmap_single(hwdev,
                                                 rp->tx_skbuff_dma[i],
                                                 rp->tx_skbuff[i]->len,
-                                                PCI_DMA_TODEVICE);
+                                                DMA_TO_DEVICE);
                        }
                        dev_kfree_skb(rp->tx_skbuff[i]);
                }
@@ -1469,7 +1540,7 @@ static void init_registers(struct net_device *dev)
 
        rhine_set_rx_mode(dev);
 
-       if (rp->pdev->revision >= VT6105M)
+       if (rp->quirks & rqMgmt)
                rhine_init_cam_filter(dev);
 
        napi_enable(&rp->napi);
@@ -1581,16 +1652,15 @@ static int rhine_open(struct net_device *dev)
        void __iomem *ioaddr = rp->base;
        int rc;
 
-       rc = request_irq(rp->pdev->irq, rhine_interrupt, IRQF_SHARED, dev->name,
-                       dev);
+       rc = request_irq(rp->irq, rhine_interrupt, IRQF_SHARED, dev->name, dev);
        if (rc)
                return rc;
 
-       netif_dbg(rp, ifup, dev, "%s() irq %d\n", __func__, rp->pdev->irq);
+       netif_dbg(rp, ifup, dev, "%s() irq %d\n", __func__, rp->irq);
 
        rc = alloc_ring(dev);
        if (rc) {
-               free_irq(rp->pdev->irq, dev);
+               free_irq(rp->irq, dev);
                return rc;
        }
        alloc_rbufs(dev);
@@ -1659,6 +1729,7 @@ static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
                                  struct net_device *dev)
 {
        struct rhine_private *rp = netdev_priv(dev);
+       struct device *hwdev = dev->dev.parent;
        void __iomem *ioaddr = rp->base;
        unsigned entry;
 
@@ -1695,9 +1766,9 @@ static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
                                                       rp->tx_bufs));
        } else {
                rp->tx_skbuff_dma[entry] =
-                       pci_map_single(rp->pdev, skb->data, skb->len,
-                                      PCI_DMA_TODEVICE);
-               if (dma_mapping_error(&rp->pdev->dev, rp->tx_skbuff_dma[entry])) {
+                       dma_map_single(hwdev, skb->data, skb->len,
+                                      DMA_TO_DEVICE);
+               if (dma_mapping_error(hwdev, rp->tx_skbuff_dma[entry])) {
                        dev_kfree_skb_any(skb);
                        rp->tx_skbuff_dma[entry] = 0;
                        dev->stats.tx_dropped++;
@@ -1788,6 +1859,7 @@ static irqreturn_t rhine_interrupt(int irq, void *dev_instance)
 static void rhine_tx(struct net_device *dev)
 {
        struct rhine_private *rp = netdev_priv(dev);
+       struct device *hwdev = dev->dev.parent;
        int txstatus = 0, entry = rp->dirty_tx % TX_RING_SIZE;
 
        /* find and cleanup dirty tx descriptors */
@@ -1831,10 +1903,10 @@ static void rhine_tx(struct net_device *dev)
                }
                /* Free the original skb. */
                if (rp->tx_skbuff_dma[entry]) {
-                       pci_unmap_single(rp->pdev,
+                       dma_unmap_single(hwdev,
                                         rp->tx_skbuff_dma[entry],
                                         rp->tx_skbuff[entry]->len,
-                                        PCI_DMA_TODEVICE);
+                                        DMA_TO_DEVICE);
                }
                dev_consume_skb_any(rp->tx_skbuff[entry]);
                rp->tx_skbuff[entry] = NULL;
@@ -1863,6 +1935,7 @@ static inline u16 rhine_get_vlan_tci(struct sk_buff *skb, int data_size)
 static int rhine_rx(struct net_device *dev, int limit)
 {
        struct rhine_private *rp = netdev_priv(dev);
+       struct device *hwdev = dev->dev.parent;
        int count;
        int entry = rp->cur_rx % RX_RING_SIZE;
 
@@ -1924,19 +1997,19 @@ static int rhine_rx(struct net_device *dev, int limit)
                        if (pkt_len < rx_copybreak)
                                skb = netdev_alloc_skb_ip_align(dev, pkt_len);
                        if (skb) {
-                               pci_dma_sync_single_for_cpu(rp->pdev,
-                                                           rp->rx_skbuff_dma[entry],
-                                                           rp->rx_buf_sz,
-                                                           PCI_DMA_FROMDEVICE);
+                               dma_sync_single_for_cpu(hwdev,
+                                                       rp->rx_skbuff_dma[entry],
+                                                       rp->rx_buf_sz,
+                                                       DMA_FROM_DEVICE);
 
                                skb_copy_to_linear_data(skb,
                                                 rp->rx_skbuff[entry]->data,
                                                 pkt_len);
                                skb_put(skb, pkt_len);
-                               pci_dma_sync_single_for_device(rp->pdev,
-                                                              rp->rx_skbuff_dma[entry],
-                                                              rp->rx_buf_sz,
-                                                              PCI_DMA_FROMDEVICE);
+                               dma_sync_single_for_device(hwdev,
+                                                          rp->rx_skbuff_dma[entry],
+                                                          rp->rx_buf_sz,
+                                                          DMA_FROM_DEVICE);
                        } else {
                                skb = rp->rx_skbuff[entry];
                                if (skb == NULL) {
@@ -1945,10 +2018,10 @@ static int rhine_rx(struct net_device *dev, int limit)
                                }
                                rp->rx_skbuff[entry] = NULL;
                                skb_put(skb, pkt_len);
-                               pci_unmap_single(rp->pdev,
+                               dma_unmap_single(hwdev,
                                                 rp->rx_skbuff_dma[entry],
                                                 rp->rx_buf_sz,
-                                                PCI_DMA_FROMDEVICE);
+                                                DMA_FROM_DEVICE);
                        }
 
                        if (unlikely(desc_length & DescTag))
@@ -1979,10 +2052,11 @@ static int rhine_rx(struct net_device *dev, int limit)
                        if (skb == NULL)
                                break;  /* Better luck next round. */
                        rp->rx_skbuff_dma[entry] =
-                               pci_map_single(rp->pdev, skb->data,
+                               dma_map_single(hwdev, skb->data,
                                               rp->rx_buf_sz,
-                                              PCI_DMA_FROMDEVICE);
-                       if (dma_mapping_error(&rp->pdev->dev, rp->rx_skbuff_dma[entry])) {
+                                              DMA_FROM_DEVICE);
+                       if (dma_mapping_error(hwdev,
+                                             rp->rx_skbuff_dma[entry])) {
                                dev_kfree_skb(skb);
                                rp->rx_skbuff_dma[entry] = 0;
                                break;
@@ -2103,7 +2177,7 @@ static void rhine_set_rx_mode(struct net_device *dev)
                /* Too many to match, or accept all multicasts. */
                iowrite32(0xffffffff, ioaddr + MulticastFilter0);
                iowrite32(0xffffffff, ioaddr + MulticastFilter1);
-       } else if (rp->pdev->revision >= VT6105M) {
+       } else if (rp->quirks & rqMgmt) {
                int i = 0;
                u32 mCAMmask = 0;       /* 32 mCAMs (6105M and better) */
                netdev_for_each_mc_addr(ha, dev) {
@@ -2125,7 +2199,7 @@ static void rhine_set_rx_mode(struct net_device *dev)
                iowrite32(mc_filter[1], ioaddr + MulticastFilter1);
        }
        /* enable/disable VLAN receive filtering */
-       if (rp->pdev->revision >= VT6105M) {
+       if (rp->quirks & rqMgmt) {
                if (dev->flags & IFF_PROMISC)
                        BYTE_REG_BITS_OFF(BCR1_VIDFR, ioaddr + PCIBusConfig1);
                else
@@ -2136,11 +2210,11 @@ static void rhine_set_rx_mode(struct net_device *dev)
 
 static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
 {
-       struct rhine_private *rp = netdev_priv(dev);
+       struct device *hwdev = dev->dev.parent;
 
        strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
        strlcpy(info->version, DRV_VERSION, sizeof(info->version));
-       strlcpy(info->bus_info, pci_name(rp->pdev), sizeof(info->bus_info));
+       strlcpy(info->bus_info, dev_name(hwdev), sizeof(info->bus_info));
 }
 
 static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
@@ -2277,7 +2351,7 @@ static int rhine_close(struct net_device *dev)
        /* Stop the chip's Tx and Rx processes. */
        iowrite16(CmdStop, ioaddr + ChipCmd);
 
-       free_irq(rp->pdev->irq, dev);
+       free_irq(rp->irq, dev);
        free_rbufs(dev);
        free_tbufs(dev);
        free_ring(dev);
@@ -2286,7 +2360,7 @@ static int rhine_close(struct net_device *dev)
 }
 
 
-static void rhine_remove_one(struct pci_dev *pdev)
+static void rhine_remove_one_pci(struct pci_dev *pdev)
 {
        struct net_device *dev = pci_get_drvdata(pdev);
        struct rhine_private *rp = netdev_priv(dev);
@@ -2300,7 +2374,21 @@ static void rhine_remove_one(struct pci_dev *pdev)
        pci_disable_device(pdev);
 }
 
-static void rhine_shutdown (struct pci_dev *pdev)
+static int rhine_remove_one_platform(struct platform_device *pdev)
+{
+       struct net_device *dev = platform_get_drvdata(pdev);
+       struct rhine_private *rp = netdev_priv(dev);
+
+       unregister_netdev(dev);
+
+       iounmap(rp->base);
+
+       free_netdev(dev);
+
+       return 0;
+}
+
+static void rhine_shutdown_pci(struct pci_dev *pdev)
 {
        struct net_device *dev = pci_get_drvdata(pdev);
        struct rhine_private *rp = netdev_priv(dev);
@@ -2354,8 +2442,7 @@ static void rhine_shutdown (struct pci_dev *pdev)
 #ifdef CONFIG_PM_SLEEP
 static int rhine_suspend(struct device *device)
 {
-       struct pci_dev *pdev = to_pci_dev(device);
-       struct net_device *dev = pci_get_drvdata(pdev);
+       struct net_device *dev = dev_get_drvdata(device);
        struct rhine_private *rp = netdev_priv(dev);
 
        if (!netif_running(dev))
@@ -2367,23 +2454,21 @@ static int rhine_suspend(struct device *device)
 
        netif_device_detach(dev);
 
-       rhine_shutdown(pdev);
+       if (dev_is_pci(device))
+               rhine_shutdown_pci(to_pci_dev(device));
 
        return 0;
 }
 
 static int rhine_resume(struct device *device)
 {
-       struct pci_dev *pdev = to_pci_dev(device);
-       struct net_device *dev = pci_get_drvdata(pdev);
+       struct net_device *dev = dev_get_drvdata(device);
        struct rhine_private *rp = netdev_priv(dev);
 
        if (!netif_running(dev))
                return 0;
 
-#ifdef USE_MMIO
        enable_mmio(rp->pioaddr, rp->quirks);
-#endif
        rhine_power_init(dev);
        free_tbufs(dev);
        free_rbufs(dev);
@@ -2408,15 +2493,26 @@ static SIMPLE_DEV_PM_OPS(rhine_pm_ops, rhine_suspend, rhine_resume);
 
 #endif /* !CONFIG_PM_SLEEP */
 
-static struct pci_driver rhine_driver = {
+static struct pci_driver rhine_driver_pci = {
        .name           = DRV_NAME,
        .id_table       = rhine_pci_tbl,
-       .probe          = rhine_init_one,
-       .remove         = rhine_remove_one,
-       .shutdown       = rhine_shutdown,
+       .probe          = rhine_init_one_pci,
+       .remove         = rhine_remove_one_pci,
+       .shutdown       = rhine_shutdown_pci,
        .driver.pm      = RHINE_PM_OPS,
 };
 
+static struct platform_driver rhine_driver_platform = {
+       .probe          = rhine_init_one_platform,
+       .remove         = rhine_remove_one_platform,
+       .driver = {
+               .name   = DRV_NAME,
+               .owner  = THIS_MODULE,
+               .of_match_table = rhine_of_tbl,
+               .pm             = RHINE_PM_OPS,
+       }
+};
+
 static struct dmi_system_id rhine_dmi_table[] __initdata = {
        {
                .ident = "EPIA-M",
@@ -2437,6 +2533,8 @@ static struct dmi_system_id rhine_dmi_table[] __initdata = {
 
 static int __init rhine_init(void)
 {
+       int ret_pci, ret_platform;
+
 /* when a module, this is printed whether or not devices are found in probe */
 #ifdef MODULE
        pr_info("%s\n", version);
@@ -2449,13 +2547,19 @@ static int __init rhine_init(void)
        else if (avoid_D3)
                pr_info("avoid_D3 set\n");
 
-       return pci_register_driver(&rhine_driver);
+       ret_pci = pci_register_driver(&rhine_driver_pci);
+       ret_platform = platform_driver_register(&rhine_driver_platform);
+       if ((ret_pci < 0) && (ret_platform < 0))
+               return ret_pci;
+
+       return 0;
 }
 
 
 static void __exit rhine_cleanup(void)
 {
-       pci_unregister_driver(&rhine_driver);
+       platform_driver_unregister(&rhine_driver_platform);
+       pci_unregister_driver(&rhine_driver_pci);
 }
 
 
index fa193c4688da78719257ac982af8be1f81b270c1..4ef818a7a6c623719f0507cfc64b56ef3de709d9 100644 (file)
@@ -75,7 +75,7 @@ int temac_indirect_busywait(struct temac_local *lp)
        long end = jiffies + 2;
 
        while (!(temac_ior(lp, XTE_RDY0_OFFSET) & XTE_RDY0_HARD_ACS_RDY_MASK)) {
-               if (end - jiffies <= 0) {
+               if (time_before_eq(end, jiffies)) {
                        WARN_ON(1);
                        return -ETIMEDOUT;
                }
index 64b4639f43b6bea6b0e69155a7cb7043a14abcc4..d4abf478e2bbf6ae25f5925f406d27923b2b949c 100644 (file)
@@ -22,7 +22,7 @@ int axienet_mdio_wait_until_ready(struct axienet_local *lp)
        long end = jiffies + 2;
        while (!(axienet_ior(lp, XAE_MDIO_MCR_OFFSET) &
                 XAE_MDIO_MCR_READY_MASK)) {
-               if (end - jiffies <= 0) {
+               if (time_before_eq(end, jiffies)) {
                        WARN_ON(1);
                        return -ETIMEDOUT;
                }
index 0d87c67a5ff7208e807a980c406a934214c9d4a6..8c4aed3053ebc0a3a3757dcae408f25249f8e630 100644 (file)
@@ -702,7 +702,7 @@ static int xemaclite_mdio_wait(struct net_local *lp)
        */
        while (__raw_readl(lp->base_addr + XEL_MDIOCTRL_OFFSET) &
                        XEL_MDIOCTRL_MDIOSTS_MASK) {
-               if (end - jiffies <= 0) {
+               if (time_before_eq(end, jiffies)) {
                        WARN_ON(1);
                        return -ETIMEDOUT;
                }
index 57eb3f906d64be9508ae700804cff05d276e347a..4b7df5a5c966a619a988a37894606e4673c5a6bd 100644 (file)
@@ -119,27 +119,14 @@ struct ndis_recv_scale_param { /* NDIS_RECEIVE_SCALE_PARAMETERS */
 };
 
 /* Fwd declaration */
-struct hv_netvsc_packet;
 struct ndis_tcp_ip_checksum_info;
 
-/* Represent the xfer page packet which contains 1 or more netvsc packet */
-struct xferpage_packet {
-       struct list_head list_ent;
-       u32 status;
-
-       /* # of netvsc packets this xfer packet contains */
-       u32 count;
-
-       struct vmbus_channel *channel;
-};
-
 /*
  * Represent netvsc packet which contains 1 RNDIS and 1 ethernet frame
  * within the RNDIS
  */
 struct hv_netvsc_packet {
        /* Bookkeeping stuff */
-       struct list_head list_ent;
        u32 status;
 
        struct hv_device *device;
@@ -149,24 +136,11 @@ struct hv_netvsc_packet {
        u16 q_idx;
        struct vmbus_channel *channel;
 
-       /*
-        * Valid only for receives when we break a xfer page packet
-        * into multiple netvsc packets
-        */
-       struct xferpage_packet *xfer_page_pkt;
+       u64 send_completion_tid;
+       void *send_completion_ctx;
+       void (*send_completion)(void *context);
 
-       union {
-               struct {
-                       u64 recv_completion_tid;
-                       void *recv_completion_ctx;
-                       void (*recv_completion)(void *context);
-               } recv;
-               struct {
-                       u64 send_completion_tid;
-                       void *send_completion_ctx;
-                       void (*send_completion)(void *context);
-               } send;
-       } completion;
+       u32 send_buf_index;
 
        /* This points to the memory after page_buf */
        struct rndis_message *rndis_msg;
@@ -610,11 +584,11 @@ struct nvsp_message {
 
 #define NETVSC_RECEIVE_BUFFER_SIZE             (1024*1024*16)  /* 16MB */
 #define NETVSC_RECEIVE_BUFFER_SIZE_LEGACY      (1024*1024*15)  /* 15MB */
+#define NETVSC_SEND_BUFFER_SIZE                        (1024 * 1024)   /* 1MB */
+#define NETVSC_INVALID_INDEX                   -1
 
-#define NETVSC_RECEIVE_BUFFER_ID               0xcafe
 
-/* Preallocated receive packets */
-#define NETVSC_RECEIVE_PACKETLIST_COUNT                256
+#define NETVSC_RECEIVE_BUFFER_ID               0xcafe
 
 #define NETVSC_PACKET_SIZE                      2048
 
@@ -630,12 +604,6 @@ struct netvsc_device {
        wait_queue_head_t wait_drain;
        bool start_remove;
        bool destroy;
-       /*
-        * List of free preallocated hv_netvsc_packet to represent receive
-        * packet
-        */
-       struct list_head recv_pkt_list;
-       spinlock_t recv_pkt_list_lock;
 
        /* Receive buffer allocated by us but manages by NetVSP */
        void *recv_buf;
@@ -644,6 +612,15 @@ struct netvsc_device {
        u32 recv_section_cnt;
        struct nvsp_1_receive_buffer_section *recv_section;
 
+       /* Send buffer allocated by us */
+       void *send_buf;
+       u32 send_buf_size;
+       u32 send_buf_gpadl_handle;
+       u32 send_section_cnt;
+       u32 send_section_size;
+       unsigned long *send_section_map;
+       int map_words;
+
        /* Used for NetVSP initialization protocol */
        struct completion channel_init_wait;
        struct nvsp_message channel_init_pkt;
index e7e77f12bc38872001497378dc4cce7bb92cd0d1..c041f63a6d3053f51d5e3f6651bc1db0d0d6d25c 100644 (file)
@@ -28,6 +28,7 @@
 #include <linux/slab.h>
 #include <linux/netdevice.h>
 #include <linux/if_ether.h>
+#include <asm/sync_bitops.h>
 
 #include "hyperv_net.h"
 
@@ -80,7 +81,7 @@ get_in_err:
 }
 
 
-static int netvsc_destroy_recv_buf(struct netvsc_device *net_device)
+static int netvsc_destroy_buf(struct netvsc_device *net_device)
 {
        struct nvsp_message *revoke_packet;
        int ret = 0;
@@ -146,10 +147,62 @@ static int netvsc_destroy_recv_buf(struct netvsc_device *net_device)
                net_device->recv_section = NULL;
        }
 
+       /* Deal with the send buffer we may have setup.
+        * If we got a  send section size, it means we received a
+        * SendsendBufferComplete msg (ie sent
+        * NvspMessage1TypeSendReceiveBuffer msg) therefore, we need
+        * to send a revoke msg here
+        */
+       if (net_device->send_section_size) {
+               /* Send the revoke receive buffer */
+               revoke_packet = &net_device->revoke_packet;
+               memset(revoke_packet, 0, sizeof(struct nvsp_message));
+
+               revoke_packet->hdr.msg_type =
+                       NVSP_MSG1_TYPE_REVOKE_SEND_BUF;
+               revoke_packet->msg.v1_msg.revoke_recv_buf.id = 0;
+
+               ret = vmbus_sendpacket(net_device->dev->channel,
+                                      revoke_packet,
+                                      sizeof(struct nvsp_message),
+                                      (unsigned long)revoke_packet,
+                                      VM_PKT_DATA_INBAND, 0);
+               /* If we failed here, we might as well return and
+                * have a leak rather than continue and a bugchk
+                */
+               if (ret != 0) {
+                       netdev_err(ndev, "unable to send "
+                                  "revoke send buffer to netvsp\n");
+                       return ret;
+               }
+       }
+       /* Teardown the gpadl on the vsp end */
+       if (net_device->send_buf_gpadl_handle) {
+               ret = vmbus_teardown_gpadl(net_device->dev->channel,
+                                          net_device->send_buf_gpadl_handle);
+
+               /* If we failed here, we might as well return and have a leak
+                * rather than continue and a bugchk
+                */
+               if (ret != 0) {
+                       netdev_err(ndev,
+                                  "unable to teardown send buffer's gpadl\n");
+                       return ret;
+               }
+               net_device->recv_buf_gpadl_handle = 0;
+       }
+       if (net_device->send_buf) {
+               /* Free up the receive buffer */
+               free_pages((unsigned long)net_device->send_buf,
+                          get_order(net_device->send_buf_size));
+               net_device->send_buf = NULL;
+       }
+       kfree(net_device->send_section_map);
+
        return ret;
 }
 
-static int netvsc_init_recv_buf(struct hv_device *device)
+static int netvsc_init_buf(struct hv_device *device)
 {
        int ret = 0;
        int t;
@@ -248,10 +301,90 @@ static int netvsc_init_recv_buf(struct hv_device *device)
                goto cleanup;
        }
 
+       /* Now setup the send buffer.
+        */
+       net_device->send_buf =
+               (void *)__get_free_pages(GFP_KERNEL|__GFP_ZERO,
+                                        get_order(net_device->send_buf_size));
+       if (!net_device->send_buf) {
+               netdev_err(ndev, "unable to allocate send "
+                          "buffer of size %d\n", net_device->send_buf_size);
+               ret = -ENOMEM;
+               goto cleanup;
+       }
+
+       /* Establish the gpadl handle for this buffer on this
+        * channel.  Note: This call uses the vmbus connection rather
+        * than the channel to establish the gpadl handle.
+        */
+       ret = vmbus_establish_gpadl(device->channel, net_device->send_buf,
+                                   net_device->send_buf_size,
+                                   &net_device->send_buf_gpadl_handle);
+       if (ret != 0) {
+               netdev_err(ndev,
+                          "unable to establish send buffer's gpadl\n");
+               goto cleanup;
+       }
+
+       /* Notify the NetVsp of the gpadl handle */
+       init_packet = &net_device->channel_init_pkt;
+       memset(init_packet, 0, sizeof(struct nvsp_message));
+       init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_SEND_BUF;
+       init_packet->msg.v1_msg.send_recv_buf.gpadl_handle =
+               net_device->send_buf_gpadl_handle;
+       init_packet->msg.v1_msg.send_recv_buf.id = 0;
+
+       /* Send the gpadl notification request */
+       ret = vmbus_sendpacket(device->channel, init_packet,
+                              sizeof(struct nvsp_message),
+                              (unsigned long)init_packet,
+                              VM_PKT_DATA_INBAND,
+                              VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
+       if (ret != 0) {
+               netdev_err(ndev,
+                          "unable to send send buffer's gpadl to netvsp\n");
+               goto cleanup;
+       }
+
+       t = wait_for_completion_timeout(&net_device->channel_init_wait, 5*HZ);
+       BUG_ON(t == 0);
+
+       /* Check the response */
+       if (init_packet->msg.v1_msg.
+           send_send_buf_complete.status != NVSP_STAT_SUCCESS) {
+               netdev_err(ndev, "Unable to complete send buffer "
+                          "initialization with NetVsp - status %d\n",
+                          init_packet->msg.v1_msg.
+                          send_recv_buf_complete.status);
+               ret = -EINVAL;
+               goto cleanup;
+       }
+
+       /* Parse the response */
+       net_device->send_section_size = init_packet->msg.
+                               v1_msg.send_send_buf_complete.section_size;
+
+       /* Section count is simply the size divided by the section size.
+        */
+       net_device->send_section_cnt =
+               net_device->send_buf_size/net_device->send_section_size;
+
+       dev_info(&device->device, "Send section size: %d, Section count:%d\n",
+                net_device->send_section_size, net_device->send_section_cnt);
+
+       /* Setup state for managing the send buffer. */
+       net_device->map_words = DIV_ROUND_UP(net_device->send_section_cnt,
+                                            BITS_PER_LONG);
+
+       net_device->send_section_map =
+               kzalloc(net_device->map_words * sizeof(ulong), GFP_KERNEL);
+       if (net_device->send_section_map == NULL)
+               goto cleanup;
+
        goto exit;
 
 cleanup:
-       netvsc_destroy_recv_buf(net_device);
+       netvsc_destroy_buf(net_device);
 
 exit:
        return ret;
@@ -369,8 +502,9 @@ static int netvsc_connect_vsp(struct hv_device *device)
                net_device->recv_buf_size = NETVSC_RECEIVE_BUFFER_SIZE_LEGACY;
        else
                net_device->recv_buf_size = NETVSC_RECEIVE_BUFFER_SIZE;
+       net_device->send_buf_size = NETVSC_SEND_BUFFER_SIZE;
 
-       ret = netvsc_init_recv_buf(device);
+       ret = netvsc_init_buf(device);
 
 cleanup:
        return ret;
@@ -378,7 +512,7 @@ cleanup:
 
 static void netvsc_disconnect_vsp(struct netvsc_device *net_device)
 {
-       netvsc_destroy_recv_buf(net_device);
+       netvsc_destroy_buf(net_device);
 }
 
 /*
@@ -387,7 +521,6 @@ static void netvsc_disconnect_vsp(struct netvsc_device *net_device)
 int netvsc_device_remove(struct hv_device *device)
 {
        struct netvsc_device *net_device;
-       struct hv_netvsc_packet *netvsc_packet, *pos;
        unsigned long flags;
 
        net_device = hv_get_drvdata(device);
@@ -416,12 +549,6 @@ int netvsc_device_remove(struct hv_device *device)
        vmbus_close(device->channel);
 
        /* Release all resources */
-       list_for_each_entry_safe(netvsc_packet, pos,
-                                &net_device->recv_pkt_list, list_ent) {
-               list_del(&netvsc_packet->list_ent);
-               kfree(netvsc_packet);
-       }
-
        if (net_device->sub_cb_buf)
                vfree(net_device->sub_cb_buf);
 
@@ -447,6 +574,12 @@ static inline u32 hv_ringbuf_avail_percent(
        return avail_write * 100 / ring_info->ring_datasize;
 }
 
+static inline void netvsc_free_send_slot(struct netvsc_device *net_device,
+                                        u32 index)
+{
+       sync_change_bit(index, net_device->send_section_map);
+}
+
 static void netvsc_send_completion(struct netvsc_device *net_device,
                                   struct hv_device *device,
                                   struct vmpacket_descriptor *packet)
@@ -454,6 +587,7 @@ static void netvsc_send_completion(struct netvsc_device *net_device,
        struct nvsp_message *nvsp_packet;
        struct hv_netvsc_packet *nvsc_packet;
        struct net_device *ndev;
+       u32 send_index;
 
        ndev = net_device->ndev;
 
@@ -484,11 +618,13 @@ static void netvsc_send_completion(struct netvsc_device *net_device,
 
                /* Notify the layer above us */
                if (nvsc_packet) {
+                       send_index = nvsc_packet->send_buf_index;
+                       if (send_index != NETVSC_INVALID_INDEX)
+                               netvsc_free_send_slot(net_device, send_index);
                        q_idx = nvsc_packet->q_idx;
                        channel = nvsc_packet->channel;
-                       nvsc_packet->completion.send.send_completion(
-                               nvsc_packet->completion.send.
-                               send_completion_ctx);
+                       nvsc_packet->send_completion(nvsc_packet->
+                                                    send_completion_ctx);
                }
 
                num_outstanding_sends =
@@ -512,6 +648,52 @@ static void netvsc_send_completion(struct netvsc_device *net_device,
 
 }
 
+static u32 netvsc_get_next_send_section(struct netvsc_device *net_device)
+{
+       unsigned long index;
+       u32 max_words = net_device->map_words;
+       unsigned long *map_addr = (unsigned long *)net_device->send_section_map;
+       u32 section_cnt = net_device->send_section_cnt;
+       int ret_val = NETVSC_INVALID_INDEX;
+       int i;
+       int prev_val;
+
+       for (i = 0; i < max_words; i++) {
+               if (!~(map_addr[i]))
+                       continue;
+               index = ffz(map_addr[i]);
+               prev_val = sync_test_and_set_bit(index, &map_addr[i]);
+               if (prev_val)
+                       continue;
+               if ((index + (i * BITS_PER_LONG)) >= section_cnt)
+                       break;
+               ret_val = (index + (i * BITS_PER_LONG));
+               break;
+       }
+       return ret_val;
+}
+
+u32 netvsc_copy_to_send_buf(struct netvsc_device *net_device,
+                           unsigned int section_index,
+                           struct hv_netvsc_packet *packet)
+{
+       char *start = net_device->send_buf;
+       char *dest = (start + (section_index * net_device->send_section_size));
+       int i;
+       u32 msg_size = 0;
+
+       for (i = 0; i < packet->page_buf_cnt; i++) {
+               char *src = phys_to_virt(packet->page_buf[i].pfn << PAGE_SHIFT);
+               u32 offset = packet->page_buf[i].offset;
+               u32 len = packet->page_buf[i].len;
+
+               memcpy(dest, (src + offset), len);
+               msg_size += len;
+               dest += len;
+       }
+       return msg_size;
+}
+
 int netvsc_send(struct hv_device *device,
                        struct hv_netvsc_packet *packet)
 {
@@ -521,6 +703,10 @@ int netvsc_send(struct hv_device *device,
        struct net_device *ndev;
        struct vmbus_channel *out_channel = NULL;
        u64 req_id;
+       unsigned int section_index = NETVSC_INVALID_INDEX;
+       u32 msg_size = 0;
+       struct sk_buff *skb;
+
 
        net_device = get_outbound_net_device(device);
        if (!net_device)
@@ -536,12 +722,28 @@ int netvsc_send(struct hv_device *device,
                sendMessage.msg.v1_msg.send_rndis_pkt.channel_type = 1;
        }
 
-       /* Not using send buffer section */
+       /* Attempt to send via sendbuf */
+       if (packet->total_data_buflen < net_device->send_section_size) {
+               section_index = netvsc_get_next_send_section(net_device);
+               if (section_index != NETVSC_INVALID_INDEX) {
+                       msg_size = netvsc_copy_to_send_buf(net_device,
+                                                          section_index,
+                                                          packet);
+                       skb = (struct sk_buff *)
+                             (unsigned long)packet->send_completion_tid;
+                       if (skb)
+                               dev_kfree_skb_any(skb);
+                       packet->page_buf_cnt = 0;
+               }
+       }
+       packet->send_buf_index = section_index;
+
+
        sendMessage.msg.v1_msg.send_rndis_pkt.send_buf_section_index =
-               0xFFFFFFFF;
-       sendMessage.msg.v1_msg.send_rndis_pkt.send_buf_section_size = 0;
+               section_index;
+       sendMessage.msg.v1_msg.send_rndis_pkt.send_buf_section_size = msg_size;
 
-       if (packet->completion.send.send_completion)
+       if (packet->send_completion)
                req_id = (ulong)packet;
        else
                req_id = 0;
@@ -641,62 +843,6 @@ retry_send_cmplt:
        }
 }
 
-/* Send a receive completion packet to RNDIS device (ie NetVsp) */
-static void netvsc_receive_completion(void *context)
-{
-       struct hv_netvsc_packet *packet = context;
-       struct hv_device *device = packet->device;
-       struct vmbus_channel *channel;
-       struct netvsc_device *net_device;
-       u64 transaction_id = 0;
-       bool fsend_receive_comp = false;
-       unsigned long flags;
-       struct net_device *ndev;
-       u32 status = NVSP_STAT_NONE;
-
-       /*
-        * Even though it seems logical to do a GetOutboundNetDevice() here to
-        * send out receive completion, we are using GetInboundNetDevice()
-        * since we may have disable outbound traffic already.
-        */
-       net_device = get_inbound_net_device(device);
-       if (!net_device)
-               return;
-       ndev = net_device->ndev;
-
-       /* Overloading use of the lock. */
-       spin_lock_irqsave(&net_device->recv_pkt_list_lock, flags);
-
-       if (packet->status != NVSP_STAT_SUCCESS)
-               packet->xfer_page_pkt->status = NVSP_STAT_FAIL;
-
-       packet->xfer_page_pkt->count--;
-
-       /*
-        * Last one in the line that represent 1 xfer page packet.
-        * Return the xfer page packet itself to the freelist
-        */
-       if (packet->xfer_page_pkt->count == 0) {
-               fsend_receive_comp = true;
-               channel = packet->xfer_page_pkt->channel;
-               transaction_id = packet->completion.recv.recv_completion_tid;
-               status = packet->xfer_page_pkt->status;
-               list_add_tail(&packet->xfer_page_pkt->list_ent,
-                             &net_device->recv_pkt_list);
-
-       }
-
-       /* Put the packet back */
-       list_add_tail(&packet->list_ent, &net_device->recv_pkt_list);
-       spin_unlock_irqrestore(&net_device->recv_pkt_list_lock, flags);
-
-       /* Send a receive completion for the xfer page packet */
-       if (fsend_receive_comp)
-               netvsc_send_recv_completion(device, channel, net_device,
-                                           transaction_id, status);
-
-}
-
 static void netvsc_receive(struct netvsc_device *net_device,
                        struct vmbus_channel *channel,
                        struct hv_device *device,
@@ -704,16 +850,13 @@ static void netvsc_receive(struct netvsc_device *net_device,
 {
        struct vmtransfer_page_packet_header *vmxferpage_packet;
        struct nvsp_message *nvsp_packet;
-       struct hv_netvsc_packet *netvsc_packet = NULL;
-       /* struct netvsc_driver *netvscDriver; */
-       struct xferpage_packet *xferpage_packet = NULL;
+       struct hv_netvsc_packet nv_pkt;
+       struct hv_netvsc_packet *netvsc_packet = &nv_pkt;
+       u32 status = NVSP_STAT_SUCCESS;
        int i;
        int count = 0;
-       unsigned long flags;
        struct net_device *ndev;
 
-       LIST_HEAD(listHead);
-
        ndev = net_device->ndev;
 
        /*
@@ -746,78 +889,14 @@ static void netvsc_receive(struct netvsc_device *net_device,
                return;
        }
 
-       /*
-        * Grab free packets (range count + 1) to represent this xfer
-        * page packet. +1 to represent the xfer page packet itself.
-        * We grab it here so that we know exactly how many we can
-        * fulfil
-        */
-       spin_lock_irqsave(&net_device->recv_pkt_list_lock, flags);
-       while (!list_empty(&net_device->recv_pkt_list)) {
-               list_move_tail(net_device->recv_pkt_list.next, &listHead);
-               if (++count == vmxferpage_packet->range_cnt + 1)
-                       break;
-       }
-       spin_unlock_irqrestore(&net_device->recv_pkt_list_lock, flags);
-
-       /*
-        * We need at least 2 netvsc pkts (1 to represent the xfer
-        * page and at least 1 for the range) i.e. we can handled
-        * some of the xfer page packet ranges...
-        */
-       if (count < 2) {
-               netdev_err(ndev, "Got only %d netvsc pkt...needed "
-                       "%d pkts. Dropping this xfer page packet completely!\n",
-                       count, vmxferpage_packet->range_cnt + 1);
-
-               /* Return it to the freelist */
-               spin_lock_irqsave(&net_device->recv_pkt_list_lock, flags);
-               for (i = count; i != 0; i--) {
-                       list_move_tail(listHead.next,
-                                      &net_device->recv_pkt_list);
-               }
-               spin_unlock_irqrestore(&net_device->recv_pkt_list_lock,
-                                      flags);
-
-               netvsc_send_recv_completion(device, channel, net_device,
-                                           vmxferpage_packet->d.trans_id,
-                                           NVSP_STAT_FAIL);
-
-               return;
-       }
-
-       /* Remove the 1st packet to represent the xfer page packet itself */
-       xferpage_packet = (struct xferpage_packet *)listHead.next;
-       list_del(&xferpage_packet->list_ent);
-       xferpage_packet->status = NVSP_STAT_SUCCESS;
-       xferpage_packet->channel = channel;
-
-       /* This is how much we can satisfy */
-       xferpage_packet->count = count - 1;
-
-       if (xferpage_packet->count != vmxferpage_packet->range_cnt) {
-               netdev_err(ndev, "Needed %d netvsc pkts to satisfy "
-                       "this xfer page...got %d\n",
-                       vmxferpage_packet->range_cnt, xferpage_packet->count);
-       }
+       count = vmxferpage_packet->range_cnt;
+       netvsc_packet->device = device;
+       netvsc_packet->channel = channel;
 
        /* Each range represents 1 RNDIS pkt that contains 1 ethernet frame */
-       for (i = 0; i < (count - 1); i++) {
-               netvsc_packet = (struct hv_netvsc_packet *)listHead.next;
-               list_del(&netvsc_packet->list_ent);
-
+       for (i = 0; i < count; i++) {
                /* Initialize the netvsc packet */
                netvsc_packet->status = NVSP_STAT_SUCCESS;
-               netvsc_packet->xfer_page_pkt = xferpage_packet;
-               netvsc_packet->completion.recv.recv_completion =
-                                       netvsc_receive_completion;
-               netvsc_packet->completion.recv.recv_completion_ctx =
-                                       netvsc_packet;
-               netvsc_packet->device = device;
-               /* Save this so that we can send it back */
-               netvsc_packet->completion.recv.recv_completion_tid =
-                                       vmxferpage_packet->d.trans_id;
-
                netvsc_packet->data = (void *)((unsigned long)net_device->
                        recv_buf + vmxferpage_packet->ranges[i].byte_offset);
                netvsc_packet->total_data_buflen =
@@ -826,10 +905,12 @@ static void netvsc_receive(struct netvsc_device *net_device,
                /* Pass it to the upper layer */
                rndis_filter_receive(device, netvsc_packet);
 
-               netvsc_receive_completion(netvsc_packet->
-                               completion.recv.recv_completion_ctx);
+               if (netvsc_packet->status != NVSP_STAT_SUCCESS)
+                       status = NVSP_STAT_FAIL;
        }
 
+       netvsc_send_recv_completion(device, channel, net_device,
+                                   vmxferpage_packet->d.trans_id, status);
 }
 
 
@@ -956,11 +1037,9 @@ void netvsc_channel_cb(void *context)
 int netvsc_device_add(struct hv_device *device, void *additional_info)
 {
        int ret = 0;
-       int i;
        int ring_size =
        ((struct netvsc_device_info *)additional_info)->ring_size;
        struct netvsc_device *net_device;
-       struct hv_netvsc_packet *packet, *pos;
        struct net_device *ndev;
 
        net_device = alloc_net_device(device);
@@ -981,18 +1060,6 @@ int netvsc_device_add(struct hv_device *device, void *additional_info)
        ndev = net_device->ndev;
 
        /* Initialize the NetVSC channel extension */
-       spin_lock_init(&net_device->recv_pkt_list_lock);
-
-       INIT_LIST_HEAD(&net_device->recv_pkt_list);
-
-       for (i = 0; i < NETVSC_RECEIVE_PACKETLIST_COUNT; i++) {
-               packet = kzalloc(sizeof(struct hv_netvsc_packet), GFP_KERNEL);
-               if (!packet)
-                       break;
-
-               list_add_tail(&packet->list_ent,
-                             &net_device->recv_pkt_list);
-       }
        init_completion(&net_device->channel_init_wait);
 
        set_per_channel_state(device->channel, net_device->cb_buffer);
@@ -1028,16 +1095,8 @@ close:
 
 cleanup:
 
-       if (net_device) {
-               list_for_each_entry_safe(packet, pos,
-                                        &net_device->recv_pkt_list,
-                                        list_ent) {
-                       list_del(&packet->list_ent);
-                       kfree(packet);
-               }
-
+       if (net_device)
                kfree(net_device);
-       }
 
        return ret;
 }
index 093cf3fc46b8683390892c48fe7dc5a2382b4f98..2e967a7bdb33ccf4a54eb6c8ab4e7f751e614c07 100644 (file)
@@ -235,11 +235,12 @@ static void netvsc_xmit_completion(void *context)
 {
        struct hv_netvsc_packet *packet = (struct hv_netvsc_packet *)context;
        struct sk_buff *skb = (struct sk_buff *)
-               (unsigned long)packet->completion.send.send_completion_tid;
+               (unsigned long)packet->send_completion_tid;
+       u32 index = packet->send_buf_index;
 
        kfree(packet);
 
-       if (skb)
+       if (skb && (index == NETVSC_INVALID_INDEX))
                dev_kfree_skb_any(skb);
 }
 
@@ -425,9 +426,9 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
                                (num_data_pgs * sizeof(struct hv_page_buffer)));
 
        /* Set the completion routine */
-       packet->completion.send.send_completion = netvsc_xmit_completion;
-       packet->completion.send.send_completion_ctx = packet;
-       packet->completion.send.send_completion_tid = (unsigned long)skb;
+       packet->send_completion = netvsc_xmit_completion;
+       packet->send_completion_ctx = packet;
+       packet->send_completion_tid = (unsigned long)skb;
 
        isvlan = packet->vlan_tci & VLAN_TAG_PRESENT;
 
@@ -466,6 +467,10 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
        if (skb_is_gso(skb))
                goto do_lso;
 
+       if ((skb->ip_summed == CHECKSUM_NONE) ||
+           (skb->ip_summed == CHECKSUM_UNNECESSARY))
+               goto do_send;
+
        rndis_msg_size += NDIS_CSUM_PPI_SIZE;
        ppi = init_ppi_data(rndis_msg, NDIS_CSUM_PPI_SIZE,
                            TCPIP_CHKSUM_PKTINFO);
@@ -638,9 +643,8 @@ int netvsc_recv_callback(struct hv_device *device_obj,
                __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
                                       packet->vlan_tci);
 
-       skb_record_rx_queue(skb, packet->xfer_page_pkt->channel->
-                           offermsg.offer.sub_channel_index %
-                           net->real_num_rx_queues);
+       skb_record_rx_queue(skb, packet->channel->
+                           offermsg.offer.sub_channel_index);
 
        net->stats.rx_packets++;
        net->stats.rx_bytes += packet->total_data_buflen;
@@ -806,7 +810,7 @@ static int netvsc_probe(struct hv_device *dev,
        net->features = NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_SG | NETIF_F_RXCSUM |
                        NETIF_F_IP_CSUM | NETIF_F_TSO;
 
-       SET_ETHTOOL_OPS(net, &ethtool_ops);
+       net->ethtool_ops = &ethtool_ops;
        SET_NETDEV_DEV(net, &dev->device);
 
        /* Notify the netvsc driver of the new device */
@@ -823,8 +827,6 @@ static int netvsc_probe(struct hv_device *dev,
        nvdev = hv_get_drvdata(dev);
        netif_set_real_num_tx_queues(net, nvdev->num_chn);
        netif_set_real_num_rx_queues(net, nvdev->num_chn);
-       dev_info(&dev->device, "real num tx,rx queues:%u, %u\n",
-                net->real_num_tx_queues, net->real_num_rx_queues);
 
        ret = register_netdev(net);
        if (ret != 0) {
index d92cfbe4341036bbeffdd8db834d220d5b514b61..99c527adae5bf1ee154b2a02eb0f93a6df32e333 100644 (file)
@@ -236,7 +236,7 @@ static int rndis_filter_send_request(struct rndis_device *dev,
                        packet->page_buf[0].len;
        }
 
-       packet->completion.send.send_completion = NULL;
+       packet->send_completion = NULL;
 
        ret = netvsc_send(dev->net_dev->dev, packet);
        return ret;
@@ -401,8 +401,6 @@ static void rndis_filter_receive_data(struct rndis_device *dev,
        pkt->total_data_buflen = rndis_pkt->data_len;
        pkt->data = (void *)((unsigned long)pkt->data + data_offset);
 
-       pkt->is_data_pkt = true;
-
        vlan = rndis_get_ppi(rndis_pkt, IEEE_8021Q_INFO);
        if (vlan) {
                pkt->vlan_tci = VLAN_TAG_PRESENT | vlan->vlanid |
index e36f194673a45a2035a15830571e4e2c02039839..4517b149ed0786946e44eb1a699fe232c2fbd166 100644 (file)
@@ -23,6 +23,7 @@
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/interrupt.h>
+#include <linux/irq.h>
 #include <linux/gpio.h>
 #include <linux/delay.h>
 #include <linux/mutex.h>
@@ -692,10 +693,7 @@ at86rf230_xmit(struct ieee802154_dev *dev, struct sk_buff *skb)
        if (rc < 0)
                goto err_rx;
 
-       rc = at86rf230_start(dev);
-
-       return rc;
-
+       return at86rf230_start(dev);
 err_rx:
        at86rf230_start(dev);
 err:
@@ -963,33 +961,24 @@ static irqreturn_t at86rf230_isr_level(int irq, void *data)
        return at86rf230_isr(irq, data);
 }
 
-static int at86rf230_irq_polarity(struct at86rf230_local *lp, int pol)
-{
-       return at86rf230_write_subreg(lp, SR_IRQ_POLARITY, pol);
-}
-
 static int at86rf230_hw_init(struct at86rf230_local *lp)
 {
-       struct at86rf230_platform_data *pdata = lp->spi->dev.platform_data;
-       int rc, irq_pol;
-       u8 status;
+       int rc, irq_pol, irq_type;
+       u8 dvdd;
        u8 csma_seed[2];
 
-       rc = at86rf230_read_subreg(lp, SR_TRX_STATUS, &status);
-       if (rc)
-               return rc;
-
        rc = at86rf230_write_subreg(lp, SR_TRX_CMD, STATE_FORCE_TRX_OFF);
        if (rc)
                return rc;
 
+       irq_type = irq_get_trigger_type(lp->spi->irq);
        /* configure irq polarity, defaults to high active */
-       if (pdata->irq_type & (IRQF_TRIGGER_FALLING | IRQF_TRIGGER_LOW))
+       if (irq_type & (IRQF_TRIGGER_FALLING | IRQF_TRIGGER_LOW))
                irq_pol = IRQ_ACTIVE_LOW;
        else
                irq_pol = IRQ_ACTIVE_HIGH;
 
-       rc = at86rf230_irq_polarity(lp, irq_pol);
+       rc = at86rf230_write_subreg(lp, SR_IRQ_POLARITY, irq_pol);
        if (rc)
                return rc;
 
@@ -1017,10 +1006,10 @@ static int at86rf230_hw_init(struct at86rf230_local *lp)
        /* Wait the next SLEEP cycle */
        msleep(100);
 
-       rc = at86rf230_read_subreg(lp, SR_DVDD_OK, &status);
+       rc = at86rf230_read_subreg(lp, SR_DVDD_OK, &dvdd);
        if (rc)
                return rc;
-       if (!status) {
+       if (!dvdd) {
                dev_err(&lp->spi->dev, "DVDD error\n");
                return -EINVAL;
        }
@@ -1032,7 +1021,6 @@ static struct at86rf230_platform_data *
 at86rf230_get_pdata(struct spi_device *spi)
 {
        struct at86rf230_platform_data *pdata;
-       const char *irq_type;
 
        if (!IS_ENABLED(CONFIG_OF) || !spi->dev.of_node)
                return spi->dev.platform_data;
@@ -1044,19 +1032,6 @@ at86rf230_get_pdata(struct spi_device *spi)
        pdata->rstn = of_get_named_gpio(spi->dev.of_node, "reset-gpio", 0);
        pdata->slp_tr = of_get_named_gpio(spi->dev.of_node, "sleep-gpio", 0);
 
-       pdata->irq_type = IRQF_TRIGGER_RISING;
-       of_property_read_string(spi->dev.of_node, "irq-type", &irq_type);
-       if (!strcmp(irq_type, "level-high"))
-               pdata->irq_type = IRQF_TRIGGER_HIGH;
-       else if (!strcmp(irq_type, "level-low"))
-               pdata->irq_type = IRQF_TRIGGER_LOW;
-       else if (!strcmp(irq_type, "edge-rising"))
-               pdata->irq_type = IRQF_TRIGGER_RISING;
-       else if (!strcmp(irq_type, "edge-falling"))
-               pdata->irq_type = IRQF_TRIGGER_FALLING;
-       else
-               dev_warn(&spi->dev, "wrong irq-type specified using edge-rising\n");
-
        spi->dev.platform_data = pdata;
 done:
        return pdata;
@@ -1071,7 +1046,7 @@ static int at86rf230_probe(struct spi_device *spi)
        u8 part = 0, version = 0, status;
        irq_handler_t irq_handler;
        work_func_t irq_worker;
-       int rc;
+       int rc, irq_type;
        const char *chip;
        struct ieee802154_ops *ops = NULL;
 
@@ -1087,27 +1062,17 @@ static int at86rf230_probe(struct spi_device *spi)
        }
 
        if (gpio_is_valid(pdata->rstn)) {
-               rc = gpio_request(pdata->rstn, "rstn");
+               rc = devm_gpio_request_one(&spi->dev, pdata->rstn,
+                                          GPIOF_OUT_INIT_HIGH, "rstn");
                if (rc)
                        return rc;
        }
 
        if (gpio_is_valid(pdata->slp_tr)) {
-               rc = gpio_request(pdata->slp_tr, "slp_tr");
-               if (rc)
-                       goto err_slp_tr;
-       }
-
-       if (gpio_is_valid(pdata->rstn)) {
-               rc = gpio_direction_output(pdata->rstn, 1);
-               if (rc)
-                       goto err_gpio_dir;
-       }
-
-       if (gpio_is_valid(pdata->slp_tr)) {
-               rc = gpio_direction_output(pdata->slp_tr, 0);
+               rc = devm_gpio_request_one(&spi->dev, pdata->slp_tr,
+                                          GPIOF_OUT_INIT_LOW, "slp_tr");
                if (rc)
-                       goto err_gpio_dir;
+                       return rc;
        }
 
        /* Reset */
@@ -1121,13 +1086,12 @@ static int at86rf230_probe(struct spi_device *spi)
 
        rc = __at86rf230_detect_device(spi, &man_id, &part, &version);
        if (rc < 0)
-               goto err_gpio_dir;
+               return rc;
 
        if (man_id != 0x001f) {
                dev_err(&spi->dev, "Non-Atmel dev found (MAN_ID %02x %02x)\n",
                        man_id >> 8, man_id & 0xFF);
-               rc = -EINVAL;
-               goto err_gpio_dir;
+               return -EINVAL;
        }
 
        switch (part) {
@@ -1154,16 +1118,12 @@ static int at86rf230_probe(struct spi_device *spi)
        }
 
        dev_info(&spi->dev, "Detected %s chip version %d\n", chip, version);
-       if (!ops) {
-               rc = -ENOTSUPP;
-               goto err_gpio_dir;
-       }
+       if (!ops)
+               return -ENOTSUPP;
 
        dev = ieee802154_alloc_device(sizeof(*lp), ops);
-       if (!dev) {
-               rc = -ENOMEM;
-               goto err_gpio_dir;
-       }
+       if (!dev)
+               return -ENOMEM;
 
        lp = dev->priv;
        lp->dev = dev;
@@ -1176,7 +1136,8 @@ static int at86rf230_probe(struct spi_device *spi)
        dev->extra_tx_headroom = 0;
        dev->flags = IEEE802154_HW_OMIT_CKSUM | IEEE802154_HW_AACK;
 
-       if (pdata->irq_type & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING)) {
+       irq_type = irq_get_trigger_type(spi->irq);
+       if (irq_type & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING)) {
                irq_worker = at86rf230_irqwork;
                irq_handler = at86rf230_isr;
        } else {
@@ -1202,75 +1163,65 @@ static int at86rf230_probe(struct spi_device *spi)
        if (rc)
                goto err_hw_init;
 
-       rc = request_irq(spi->irq, irq_handler,
-                        IRQF_SHARED | pdata->irq_type,
-                        dev_name(&spi->dev), lp);
+       /* Read irq status register to reset irq line */
+       rc = at86rf230_read_subreg(lp, RG_IRQ_STATUS, 0xff, 0, &status);
        if (rc)
                goto err_hw_init;
 
-       /* Read irq status register to reset irq line */
-       rc = at86rf230_read_subreg(lp, RG_IRQ_STATUS, 0xff, 0, &status);
+       rc = devm_request_irq(&spi->dev, spi->irq, irq_handler, IRQF_SHARED,
+                             dev_name(&spi->dev), lp);
        if (rc)
-               goto err_irq;
+               goto err_hw_init;
 
        rc = ieee802154_register_device(lp->dev);
        if (rc)
-               goto err_irq;
+               goto err_hw_init;
 
        return rc;
 
-err_irq:
-       free_irq(spi->irq, lp);
 err_hw_init:
        flush_work(&lp->irqwork);
-       spi_set_drvdata(spi, NULL);
        mutex_destroy(&lp->bmux);
        ieee802154_free_device(lp->dev);
 
-err_gpio_dir:
-       if (gpio_is_valid(pdata->slp_tr))
-               gpio_free(pdata->slp_tr);
-err_slp_tr:
-       if (gpio_is_valid(pdata->rstn))
-               gpio_free(pdata->rstn);
        return rc;
 }
 
 static int at86rf230_remove(struct spi_device *spi)
 {
        struct at86rf230_local *lp = spi_get_drvdata(spi);
-       struct at86rf230_platform_data *pdata = spi->dev.platform_data;
 
        /* mask all at86rf230 irq's */
        at86rf230_write_subreg(lp, SR_IRQ_MASK, 0);
        ieee802154_unregister_device(lp->dev);
-
-       free_irq(spi->irq, lp);
        flush_work(&lp->irqwork);
-
-       if (gpio_is_valid(pdata->slp_tr))
-               gpio_free(pdata->slp_tr);
-       if (gpio_is_valid(pdata->rstn))
-               gpio_free(pdata->rstn);
-
        mutex_destroy(&lp->bmux);
        ieee802154_free_device(lp->dev);
-
        dev_dbg(&spi->dev, "unregistered at86rf230\n");
+
        return 0;
 }
 
-#if IS_ENABLED(CONFIG_OF)
-static struct of_device_id at86rf230_of_match[] = {
+static const struct of_device_id at86rf230_of_match[] = {
        { .compatible = "atmel,at86rf230", },
        { .compatible = "atmel,at86rf231", },
        { .compatible = "atmel,at86rf233", },
        { .compatible = "atmel,at86rf212", },
        { },
 };
-#endif
+MODULE_DEVICE_TABLE(of, at86rf230_of_match);
+
+static const struct spi_device_id at86rf230_device_id[] = {
+       { .name = "at86rf230", },
+       { .name = "at86rf231", },
+       { .name = "at86rf233", },
+       { .name = "at86rf212", },
+       { },
+};
+MODULE_DEVICE_TABLE(spi, at86rf230_device_id);
 
 static struct spi_driver at86rf230_driver = {
+       .id_table = at86rf230_device_id,
        .driver = {
                .of_match_table = of_match_ptr(at86rf230_of_match),
                .name   = "at86rf230",
index b8d22173925dee1aed62df3ccd4d23deb44b976b..27d83207d24ce0de722144c74e98ce65601fcfc0 100644 (file)
@@ -26,6 +26,7 @@
 #include <linux/timer.h>
 #include <linux/platform_device.h>
 #include <linux/netdevice.h>
+#include <linux/device.h>
 #include <linux/spinlock.h>
 #include <net/mac802154.h>
 #include <net/wpan-phy.h>
@@ -228,7 +229,8 @@ static int fakelb_probe(struct platform_device *pdev)
        int err = -ENOMEM;
        int i;
 
-       priv = kzalloc(sizeof(struct fakelb_priv), GFP_KERNEL);
+       priv = devm_kzalloc(&pdev->dev, sizeof(struct fakelb_priv),
+                           GFP_KERNEL);
        if (!priv)
                goto err_alloc;
 
@@ -248,7 +250,6 @@ static int fakelb_probe(struct platform_device *pdev)
 err_slave:
        list_for_each_entry(dp, &priv->list, list)
                fakelb_del(dp);
-       kfree(priv);
 err_alloc:
        return err;
 }
@@ -260,7 +261,6 @@ static int fakelb_remove(struct platform_device *pdev)
 
        list_for_each_entry_safe(dp, temp, &priv->list, list)
                fakelb_del(dp);
-       kfree(priv);
 
        return 0;
 }
index 3da44d5d91497801a141b373c60f8cd5890a1bf9..8d101d63abca9a48466edfed2db11ab54df32d63 100644 (file)
@@ -396,7 +396,8 @@ config MCS_FIR
 
 config SH_IRDA
        tristate "SuperH IrDA driver"
-       depends on IRDA && ARCH_SHMOBILE
+       depends on IRDA
+       depends on ARCH_SHMOBILE || COMPILE_TEST
        help
          Say Y here if your want to enable SuperH IrDA devices.
 
index e641bb2403624fdd91fb565ea646d70d03c2c5fd..11dbdf36d9c1b328ff70b6e5d0e5d2f7729dbb2a 100644 (file)
 #include "w83977af.h"
 #include "w83977af_ir.h"
 
-#ifdef  CONFIG_ARCH_NETWINDER            /* Adjust to NetWinder differences */
-#undef  CONFIG_NETWINDER_TX_DMA_PROBLEMS /* Not needed */
-#define CONFIG_NETWINDER_RX_DMA_PROBLEMS /* Must have this one! */
-#endif
 #define CONFIG_USE_W977_PNP        /* Currently needed */
 #define PIO_MAX_SPEED       115200 
 
@@ -332,7 +328,7 @@ static int w83977af_probe(int iobase, int irq, int dma)
                w977_write_reg(0x74, dma+1, efbase[i]);
 #else
                w977_write_reg(0x74, dma, efbase[i]);   
-#endif /*CONFIG_ARCH_NETWINDER */
+#endif /* CONFIG_ARCH_NETWINDER */
                w977_write_reg(0x75, 0x04, efbase[i]);  /* Disable Tx DMA */
        
                /* Set append hardware CRC, enable IR bank selection */ 
@@ -563,10 +559,6 @@ static netdev_tx_t w83977af_hard_xmit(struct sk_buff *skb,
 static void w83977af_dma_write(struct w83977af_ir *self, int iobase)
 {
        __u8 set;
-#ifdef CONFIG_NETWINDER_TX_DMA_PROBLEMS
-       unsigned long flags;
-       __u8 hcr;
-#endif
         IRDA_DEBUG(4, "%s(), len=%d\n", __func__ , self->tx_buff.len);
 
        /* Save current set */
@@ -579,30 +571,13 @@ static void w83977af_dma_write(struct w83977af_ir *self, int iobase)
        /* Choose transmit DMA channel  */ 
        switch_bank(iobase, SET2);
        outb(ADCR1_D_CHSW|/*ADCR1_DMA_F|*/ADCR1_ADV_SL, iobase+ADCR1);
-#ifdef CONFIG_NETWINDER_TX_DMA_PROBLEMS
-       spin_lock_irqsave(&self->lock, flags);
-
-       disable_dma(self->io.dma);
-       clear_dma_ff(self->io.dma);
-       set_dma_mode(self->io.dma, DMA_MODE_READ);
-       set_dma_addr(self->io.dma, self->tx_buff_dma);
-       set_dma_count(self->io.dma, self->tx_buff.len);
-#else
        irda_setup_dma(self->io.dma, self->tx_buff_dma, self->tx_buff.len,
                       DMA_MODE_WRITE); 
-#endif
        self->io.direction = IO_XMIT;
        
        /* Enable DMA */
        switch_bank(iobase, SET0);
-#ifdef CONFIG_NETWINDER_TX_DMA_PROBLEMS
-       hcr = inb(iobase+HCR);
-       outb(hcr | HCR_EN_DMA, iobase+HCR);
-       enable_dma(self->io.dma);
-       spin_unlock_irqrestore(&self->lock, flags);
-#else  
        outb(inb(iobase+HCR) | HCR_EN_DMA | HCR_TX_WT, iobase+HCR);
-#endif
 
        /* Restore set register */
        outb(set, iobase+SSR);
@@ -711,7 +686,7 @@ static int w83977af_dma_receive(struct w83977af_ir *self)
 {
        int iobase;
        __u8 set;
-#ifdef CONFIG_NETWINDER_RX_DMA_PROBLEMS
+#ifdef CONFIG_ARCH_NETWINDER
        unsigned long flags;
        __u8 hcr;
 #endif
@@ -736,7 +711,7 @@ static int w83977af_dma_receive(struct w83977af_ir *self)
        self->io.direction = IO_RECV;
        self->rx_buff.data = self->rx_buff.head;
 
-#ifdef CONFIG_NETWINDER_RX_DMA_PROBLEMS
+#ifdef CONFIG_ARCH_NETWINDER
        spin_lock_irqsave(&self->lock, flags);
 
        disable_dma(self->io.dma);
@@ -759,7 +734,7 @@ static int w83977af_dma_receive(struct w83977af_ir *self)
        
        /* Enable DMA */
        switch_bank(iobase, SET0);
-#ifdef CONFIG_NETWINDER_RX_DMA_PROBLEMS
+#ifdef CONFIG_ARCH_NETWINDER
        hcr = inb(iobase+HCR);
        outb(hcr | HCR_EN_DMA, iobase+HCR);
        enable_dma(self->io.dma);
index 8b8220fcdd3d2e7448f1834d4462924d27b3d2c5..f4701da19a026bdfb2e037bf00196267b9e9fa10 100644 (file)
@@ -44,9 +44,10 @@ struct macvlan_port {
        struct sk_buff_head     bc_queue;
        struct work_struct      bc_work;
        bool                    passthru;
-       int                     count;
 };
 
+#define MACVLAN_PORT_IS_EMPTY(port)    list_empty(&port->vlans)
+
 struct macvlan_skb_cb {
        const struct macvlan_dev *src;
 };
@@ -239,25 +240,28 @@ static void macvlan_process_broadcast(struct work_struct *w)
 static void macvlan_broadcast_enqueue(struct macvlan_port *port,
                                      struct sk_buff *skb)
 {
+       struct sk_buff *nskb;
        int err = -ENOMEM;
 
-       skb = skb_clone(skb, GFP_ATOMIC);
-       if (!skb)
+       nskb = skb_clone(skb, GFP_ATOMIC);
+       if (!nskb)
                goto err;
 
        spin_lock(&port->bc_queue.lock);
        if (skb_queue_len(&port->bc_queue) < skb->dev->tx_queue_len) {
-               __skb_queue_tail(&port->bc_queue, skb);
+               __skb_queue_tail(&port->bc_queue, nskb);
                err = 0;
        }
        spin_unlock(&port->bc_queue.lock);
 
        if (err)
-               goto err;
+               goto free_nskb;
 
        schedule_work(&port->bc_work);
        return;
 
+free_nskb:
+       kfree_skb(nskb);
 err:
        atomic_long_inc(&skb->dev->rx_dropped);
 }
@@ -329,11 +333,9 @@ static int macvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev)
        const struct macvlan_dev *vlan = netdev_priv(dev);
        const struct macvlan_port *port = vlan->port;
        const struct macvlan_dev *dest;
-       __u8 ip_summed = skb->ip_summed;
 
        if (vlan->mode == MACVLAN_MODE_BRIDGE) {
                const struct ethhdr *eth = (void *)skb->data;
-               skb->ip_summed = CHECKSUM_UNNECESSARY;
 
                /* send to other bridge ports directly */
                if (is_multicast_ether_addr(eth->h_dest)) {
@@ -351,7 +353,6 @@ static int macvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev)
        }
 
 xmit_world:
-       skb->ip_summed = ip_summed;
        skb->dev = vlan->lowerdev;
        return dev_queue_xmit(skb);
 }
@@ -628,8 +629,7 @@ static void macvlan_uninit(struct net_device *dev)
 
        free_percpu(vlan->pcpu_stats);
 
-       port->count -= 1;
-       if (!port->count)
+       if (MACVLAN_PORT_IS_EMPTY(port))
                macvlan_port_destroy(port->dev);
 }
 
@@ -931,13 +931,12 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
                vlan->flags = nla_get_u16(data[IFLA_MACVLAN_FLAGS]);
 
        if (vlan->mode == MACVLAN_MODE_PASSTHRU) {
-               if (port->count)
+               if (!MACVLAN_PORT_IS_EMPTY(port))
                        return -EINVAL;
                port->passthru = true;
                eth_hw_addr_inherit(dev, lowerdev);
        }
 
-       port->count += 1;
        err = register_netdevice(dev);
        if (err < 0)
                goto destroy_port;
@@ -955,8 +954,7 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
 unregister_netdev:
        unregister_netdevice(dev);
 destroy_port:
-       port->count -= 1;
-       if (!port->count)
+       if (MACVLAN_PORT_IS_EMPTY(port))
                macvlan_port_destroy(lowerdev);
 
        return err;
@@ -1091,6 +1089,13 @@ static int macvlan_device_event(struct notifier_block *unused,
                        netdev_update_features(vlan->dev);
                }
                break;
+       case NETDEV_CHANGEMTU:
+               list_for_each_entry(vlan, &port->vlans, list) {
+                       if (vlan->dev->mtu <= dev->mtu)
+                               continue;
+                       dev_set_mtu(vlan->dev, dev->mtu);
+               }
+               break;
        case NETDEV_UNREGISTER:
                /* twiddle thumbs on netns device moves */
                if (dev->reg_state != NETREG_UNREGISTERING)
index ff111a89e17f9c66561d79916d8d57e282c119d2..3381c4f91a8cc236df0df8be59bd586538480498 100644 (file)
@@ -322,6 +322,15 @@ static rx_handler_result_t macvtap_handle_frame(struct sk_buff **pskb)
                        segs = nskb;
                }
        } else {
+               /* If we receive a partial checksum and the tap side
+                * doesn't support checksum offload, compute the checksum.
+                * Note: it doesn't matter which checksum feature to
+                *        check, we either support them all or none.
+                */
+               if (skb->ip_summed == CHECKSUM_PARTIAL &&
+                   !(features & NETIF_F_ALL_CSUM) &&
+                   skb_checksum_help(skb))
+                       goto drop;
                skb_queue_tail(&q->sk.sk_receive_queue, skb);
        }
 
index 63aa9d9e34c52b1c2150fa7876ce3658a7c11fe5..27536aa8919950cc18ca0ac1b7c4965523839032 100644 (file)
@@ -348,7 +348,7 @@ static int ntb_netdev_probe(struct pci_dev *pdev)
        memcpy(ndev->dev_addr, ndev->perm_addr, ndev->addr_len);
 
        ndev->netdev_ops = &ntb_netdev_ops;
-       SET_ETHTOOL_OPS(ndev, &ntb_ethtool_ops);
+       ndev->ethtool_ops = &ntb_ethtool_ops;
 
        dev->qp = ntb_transport_create_queue(ndev, pdev, &ntb_netdev_handlers);
        if (!dev->qp) {
index b256083aa69ebabbed9ae06079835a704b66ce1a..6c622aedbae111842b0e8ec86aede54653b84ec6 100644 (file)
@@ -253,8 +253,7 @@ static int __init atheros_init(void)
 
 static void __exit atheros_exit(void)
 {
-       return phy_drivers_unregister(at803x_driver,
-                                     ARRAY_SIZE(at803x_driver));
+       phy_drivers_unregister(at803x_driver, ARRAY_SIZE(at803x_driver));
 }
 
 module_init(atheros_init);
index ba55adfc7aaef00b7cfee22fe0fd2ff8eb67ffe8..d60d875cb4450ab6cf72114c35b4c816e2e031fd 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/phy_fixed.h>
 #include <linux/err.h>
 #include <linux/slab.h>
+#include <linux/of.h>
 
 #define MII_REGS_NUM 29
 
@@ -31,7 +32,7 @@ struct fixed_mdio_bus {
 };
 
 struct fixed_phy {
-       int id;
+       int addr;
        u16 regs[MII_REGS_NUM];
        struct phy_device *phydev;
        struct fixed_phy_status status;
@@ -104,8 +105,8 @@ static int fixed_phy_update_regs(struct fixed_phy *fp)
        if (fp->status.asym_pause)
                lpa |= LPA_PAUSE_ASYM;
 
-       fp->regs[MII_PHYSID1] = fp->id >> 16;
-       fp->regs[MII_PHYSID2] = fp->id;
+       fp->regs[MII_PHYSID1] = 0;
+       fp->regs[MII_PHYSID2] = 0;
 
        fp->regs[MII_BMSR] = bmsr;
        fp->regs[MII_BMCR] = bmcr;
@@ -115,7 +116,7 @@ static int fixed_phy_update_regs(struct fixed_phy *fp)
        return 0;
 }
 
-static int fixed_mdio_read(struct mii_bus *bus, int phy_id, int reg_num)
+static int fixed_mdio_read(struct mii_bus *bus, int phy_addr, int reg_num)
 {
        struct fixed_mdio_bus *fmb = bus->priv;
        struct fixed_phy *fp;
@@ -124,7 +125,7 @@ static int fixed_mdio_read(struct mii_bus *bus, int phy_id, int reg_num)
                return -1;
 
        list_for_each_entry(fp, &fmb->phys, node) {
-               if (fp->id == phy_id) {
+               if (fp->addr == phy_addr) {
                        /* Issue callback if user registered it. */
                        if (fp->link_update) {
                                fp->link_update(fp->phydev->attached_dev,
@@ -138,7 +139,7 @@ static int fixed_mdio_read(struct mii_bus *bus, int phy_id, int reg_num)
        return 0xFFFF;
 }
 
-static int fixed_mdio_write(struct mii_bus *bus, int phy_id, int reg_num,
+static int fixed_mdio_write(struct mii_bus *bus, int phy_addr, int reg_num,
                            u16 val)
 {
        return 0;
@@ -160,7 +161,7 @@ int fixed_phy_set_link_update(struct phy_device *phydev,
                return -EINVAL;
 
        list_for_each_entry(fp, &fmb->phys, node) {
-               if (fp->id == phydev->phy_id) {
+               if (fp->addr == phydev->addr) {
                        fp->link_update = link_update;
                        fp->phydev = phydev;
                        return 0;
@@ -171,7 +172,7 @@ int fixed_phy_set_link_update(struct phy_device *phydev,
 }
 EXPORT_SYMBOL_GPL(fixed_phy_set_link_update);
 
-int fixed_phy_add(unsigned int irq, int phy_id,
+int fixed_phy_add(unsigned int irq, int phy_addr,
                  struct fixed_phy_status *status)
 {
        int ret;
@@ -184,9 +185,9 @@ int fixed_phy_add(unsigned int irq, int phy_id,
 
        memset(fp->regs, 0xFF,  sizeof(fp->regs[0]) * MII_REGS_NUM);
 
-       fmb->irqs[phy_id] = irq;
+       fmb->irqs[phy_addr] = irq;
 
-       fp->id = phy_id;
+       fp->addr = phy_addr;
        fp->status = *status;
 
        ret = fixed_phy_update_regs(fp);
@@ -203,6 +204,66 @@ err_regs:
 }
 EXPORT_SYMBOL_GPL(fixed_phy_add);
 
+void fixed_phy_del(int phy_addr)
+{
+       struct fixed_mdio_bus *fmb = &platform_fmb;
+       struct fixed_phy *fp, *tmp;
+
+       list_for_each_entry_safe(fp, tmp, &fmb->phys, node) {
+               if (fp->addr == phy_addr) {
+                       list_del(&fp->node);
+                       kfree(fp);
+                       return;
+               }
+       }
+}
+EXPORT_SYMBOL_GPL(fixed_phy_del);
+
+static int phy_fixed_addr;
+static DEFINE_SPINLOCK(phy_fixed_addr_lock);
+
+int fixed_phy_register(unsigned int irq,
+                      struct fixed_phy_status *status,
+                      struct device_node *np)
+{
+       struct fixed_mdio_bus *fmb = &platform_fmb;
+       struct phy_device *phy;
+       int phy_addr;
+       int ret;
+
+       /* Get the next available PHY address, up to PHY_MAX_ADDR */
+       spin_lock(&phy_fixed_addr_lock);
+       if (phy_fixed_addr == PHY_MAX_ADDR) {
+               spin_unlock(&phy_fixed_addr_lock);
+               return -ENOSPC;
+       }
+       phy_addr = phy_fixed_addr++;
+       spin_unlock(&phy_fixed_addr_lock);
+
+       ret = fixed_phy_add(PHY_POLL, phy_addr, status);
+       if (ret < 0)
+               return ret;
+
+       phy = get_phy_device(fmb->mii_bus, phy_addr, false);
+       if (!phy || IS_ERR(phy)) {
+               fixed_phy_del(phy_addr);
+               return -EINVAL;
+       }
+
+       of_node_get(np);
+       phy->dev.of_node = np;
+
+       ret = phy_device_register(phy);
+       if (ret) {
+               phy_device_free(phy);
+               of_node_put(np);
+               fixed_phy_del(phy_addr);
+               return ret;
+       }
+
+       return 0;
+}
+
 static int __init fixed_mdio_bus_init(void)
 {
        struct fixed_mdio_bus *fmb = &platform_fmb;
index 9c4defdec67b09299f38f1b06bf8eacbccd007d1..5f1a2250018fec5ba01a2a1a1a11f2d736f544a9 100644 (file)
@@ -215,6 +215,10 @@ static int mdio_gpio_probe(struct platform_device *pdev)
        if (pdev->dev.of_node) {
                pdata = mdio_gpio_of_get_data(pdev);
                bus_id = of_alias_get_id(pdev->dev.of_node, "mdio-gpio");
+               if (bus_id < 0) {
+                       dev_warn(&pdev->dev, "failed to get alias id\n");
+                       bus_id = 0;
+               }
        } else {
                pdata = dev_get_platdata(&pdev->dev);
                bus_id = pdev->id;
index 76f54b32a120832f2ce212c129592a6f30ab83df..a6284964b71144055511dd7ecdc91b09ae09b02a 100644 (file)
@@ -69,6 +69,73 @@ struct mii_bus *mdiobus_alloc_size(size_t size)
 }
 EXPORT_SYMBOL(mdiobus_alloc_size);
 
+static void _devm_mdiobus_free(struct device *dev, void *res)
+{
+       mdiobus_free(*(struct mii_bus **)res);
+}
+
+static int devm_mdiobus_match(struct device *dev, void *res, void *data)
+{
+       struct mii_bus **r = res;
+
+       if (WARN_ON(!r || !*r))
+               return 0;
+
+       return *r == data;
+}
+
+/**
+ * devm_mdiobus_alloc_size - Resource-managed mdiobus_alloc_size()
+ * @dev:               Device to allocate mii_bus for
+ * @sizeof_priv:       Space to allocate for private structure.
+ *
+ * Managed mdiobus_alloc_size. mii_bus allocated with this function is
+ * automatically freed on driver detach.
+ *
+ * If an mii_bus allocated with this function needs to be freed separately,
+ * devm_mdiobus_free() must be used.
+ *
+ * RETURNS:
+ * Pointer to allocated mii_bus on success, NULL on failure.
+ */
+struct mii_bus *devm_mdiobus_alloc_size(struct device *dev, int sizeof_priv)
+{
+       struct mii_bus **ptr, *bus;
+
+       ptr = devres_alloc(_devm_mdiobus_free, sizeof(*ptr), GFP_KERNEL);
+       if (!ptr)
+               return NULL;
+
+       /* use raw alloc_dr for kmalloc caller tracing */
+       bus = mdiobus_alloc_size(sizeof_priv);
+       if (bus) {
+               *ptr = bus;
+               devres_add(dev, ptr);
+       } else {
+               devres_free(ptr);
+       }
+
+       return bus;
+}
+EXPORT_SYMBOL_GPL(devm_mdiobus_alloc_size);
+
+/**
+ * devm_mdiobus_free - Resource-managed mdiobus_free()
+ * @dev:               Device this mii_bus belongs to
+ * @bus:               the mii_bus associated with the device
+ *
+ * Free mii_bus allocated with devm_mdiobus_alloc_size().
+ */
+void devm_mdiobus_free(struct device *dev, struct mii_bus *bus)
+{
+       int rc;
+
+       rc = devres_release(dev, _devm_mdiobus_free,
+                           devm_mdiobus_match, bus);
+       WARN_ON(rc);
+}
+EXPORT_SYMBOL_GPL(devm_mdiobus_free);
+
 /**
  * mdiobus_release - mii_bus device release callback
  * @d: the target struct device that contains the mii_bus
index 5ad971a55c5d9f21ffb3ded8e9d5704534095d21..bc7c7d2f75f26e41ccd205eeaa6402eee25e0233 100644 (file)
@@ -246,13 +246,13 @@ static int ksz9021_load_values_from_of(struct phy_device *phydev,
        if (val1 != -1)
                newval = ((newval & 0xfff0) | ((val1 / PS_TO_REG) & 0xf) << 0);
 
-       if (val2 != -1)
+       if (val2 != -2)
                newval = ((newval & 0xff0f) | ((val2 / PS_TO_REG) & 0xf) << 4);
 
-       if (val3 != -1)
+       if (val3 != -3)
                newval = ((newval & 0xf0ff) | ((val3 / PS_TO_REG) & 0xf) << 8);
 
-       if (val4 != -1)
+       if (val4 != -4)
                newval = ((newval & 0x0fff) | ((val4 / PS_TO_REG) & 0xf) << 12);
 
        return kszphy_extended_write(phydev, reg, newval);
@@ -283,6 +283,110 @@ static int ksz9021_config_init(struct phy_device *phydev)
        return 0;
 }
 
+#define MII_KSZ9031RN_MMD_CTRL_REG     0x0d
+#define MII_KSZ9031RN_MMD_REGDATA_REG  0x0e
+#define OP_DATA                                1
+#define KSZ9031_PS_TO_REG              60
+
+/* Extended registers */
+#define MII_KSZ9031RN_CONTROL_PAD_SKEW 4
+#define MII_KSZ9031RN_RX_DATA_PAD_SKEW 5
+#define MII_KSZ9031RN_TX_DATA_PAD_SKEW 6
+#define MII_KSZ9031RN_CLK_PAD_SKEW     8
+
+static int ksz9031_extended_write(struct phy_device *phydev,
+                                 u8 mode, u32 dev_addr, u32 regnum, u16 val)
+{
+       phy_write(phydev, MII_KSZ9031RN_MMD_CTRL_REG, dev_addr);
+       phy_write(phydev, MII_KSZ9031RN_MMD_REGDATA_REG, regnum);
+       phy_write(phydev, MII_KSZ9031RN_MMD_CTRL_REG, (mode << 14) | dev_addr);
+       return phy_write(phydev, MII_KSZ9031RN_MMD_REGDATA_REG, val);
+}
+
+static int ksz9031_extended_read(struct phy_device *phydev,
+                                u8 mode, u32 dev_addr, u32 regnum)
+{
+       phy_write(phydev, MII_KSZ9031RN_MMD_CTRL_REG, dev_addr);
+       phy_write(phydev, MII_KSZ9031RN_MMD_REGDATA_REG, regnum);
+       phy_write(phydev, MII_KSZ9031RN_MMD_CTRL_REG, (mode << 14) | dev_addr);
+       return phy_read(phydev, MII_KSZ9031RN_MMD_REGDATA_REG);
+}
+
+static int ksz9031_of_load_skew_values(struct phy_device *phydev,
+                                      struct device_node *of_node,
+                                      u16 reg, size_t field_sz,
+                                      char *field[], u8 numfields)
+{
+       int val[4] = {-1, -2, -3, -4};
+       int matches = 0;
+       u16 mask;
+       u16 maxval;
+       u16 newval;
+       int i;
+
+       for (i = 0; i < numfields; i++)
+               if (!of_property_read_u32(of_node, field[i], val + i))
+                       matches++;
+
+       if (!matches)
+               return 0;
+
+       if (matches < numfields)
+               newval = ksz9031_extended_read(phydev, OP_DATA, 2, reg);
+       else
+               newval = 0;
+
+       maxval = (field_sz == 4) ? 0xf : 0x1f;
+       for (i = 0; i < numfields; i++)
+               if (val[i] != -(i + 1)) {
+                       mask = 0xffff;
+                       mask ^= maxval << (field_sz * i);
+                       newval = (newval & mask) |
+                               (((val[i] / KSZ9031_PS_TO_REG) & maxval)
+                                       << (field_sz * i));
+               }
+
+       return ksz9031_extended_write(phydev, OP_DATA, 2, reg, newval);
+}
+
+static int ksz9031_config_init(struct phy_device *phydev)
+{
+       struct device *dev = &phydev->dev;
+       struct device_node *of_node = dev->of_node;
+       char *clk_skews[2] = {"rxc-skew-ps", "txc-skew-ps"};
+       char *rx_data_skews[4] = {
+               "rxd0-skew-ps", "rxd1-skew-ps",
+               "rxd2-skew-ps", "rxd3-skew-ps"
+       };
+       char *tx_data_skews[4] = {
+               "txd0-skew-ps", "txd1-skew-ps",
+               "txd2-skew-ps", "txd3-skew-ps"
+       };
+       char *control_skews[2] = {"txen-skew-ps", "rxdv-skew-ps"};
+
+       if (!of_node && dev->parent->of_node)
+               of_node = dev->parent->of_node;
+
+       if (of_node) {
+               ksz9031_of_load_skew_values(phydev, of_node,
+                               MII_KSZ9031RN_CLK_PAD_SKEW, 5,
+                               clk_skews, 2);
+
+               ksz9031_of_load_skew_values(phydev, of_node,
+                               MII_KSZ9031RN_CONTROL_PAD_SKEW, 4,
+                               control_skews, 2);
+
+               ksz9031_of_load_skew_values(phydev, of_node,
+                               MII_KSZ9031RN_RX_DATA_PAD_SKEW, 4,
+                               rx_data_skews, 4);
+
+               ksz9031_of_load_skew_values(phydev, of_node,
+                               MII_KSZ9031RN_TX_DATA_PAD_SKEW, 4,
+                               tx_data_skews, 4);
+       }
+       return 0;
+}
+
 #define KSZ8873MLL_GLOBAL_CONTROL_4    0x06
 #define KSZ8873MLL_GLOBAL_CONTROL_4_DUPLEX     (1 << 6)
 #define KSZ8873MLL_GLOBAL_CONTROL_4_SPEED      (1 << 4)
@@ -469,7 +573,7 @@ static struct phy_driver ksphy_driver[] = {
        .features       = (PHY_GBIT_FEATURES | SUPPORTED_Pause
                                | SUPPORTED_Asym_Pause),
        .flags          = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
-       .config_init    = kszphy_config_init,
+       .config_init    = ksz9031_config_init,
        .config_aneg    = genphy_config_aneg,
        .read_status    = genphy_read_status,
        .ack_interrupt  = kszphy_ack_interrupt,
index 1b6d09aef42748bcbba6d4fe88ca68d6ea83c852..a972056b22498c15596a88f6b348e0f066ddb85b 100644 (file)
@@ -765,6 +765,17 @@ void phy_state_machine(struct work_struct *work)
                        break;
 
                if (phydev->link) {
+                       if (AUTONEG_ENABLE == phydev->autoneg) {
+                               err = phy_aneg_done(phydev);
+                               if (err < 0)
+                                       break;
+
+                               if (!err) {
+                                       phydev->state = PHY_AN;
+                                       phydev->link_timeout = PHY_AN_TIMEOUT;
+                                       break;
+                               }
+                       }
                        phydev->state = PHY_RUNNING;
                        netif_carrier_on(phydev->attached_dev);
                        phydev->adjust_link(phydev->attached_dev);
index 466ae3e063220179580c48d351ceaa0eac5ee615..eb3b946bd8c00f3294d60d3d548cefe552f9ab1e 100644 (file)
@@ -33,6 +33,7 @@
 #include <linux/mdio.h>
 #include <linux/io.h>
 #include <linux/uaccess.h>
+#include <linux/of.h>
 
 #include <asm/irq.h>
 
@@ -1072,9 +1073,6 @@ int genphy_config_init(struct phy_device *phydev)
        int val;
        u32 features;
 
-       /* For now, I'll claim that the generic driver supports
-        * all possible port types
-        */
        features = (SUPPORTED_TP | SUPPORTED_MII
                        | SUPPORTED_AUI | SUPPORTED_FIBRE |
                        SUPPORTED_BNC);
@@ -1107,8 +1105,8 @@ int genphy_config_init(struct phy_device *phydev)
                        features |= SUPPORTED_1000baseT_Half;
        }
 
-       phydev->supported = features;
-       phydev->advertising = features;
+       phydev->supported &= features;
+       phydev->advertising &= features;
 
        return 0;
 }
@@ -1169,6 +1167,38 @@ static int gen10g_resume(struct phy_device *phydev)
        return 0;
 }
 
+static void of_set_phy_supported(struct phy_device *phydev)
+{
+       struct device_node *node = phydev->dev.of_node;
+       u32 max_speed;
+
+       if (!IS_ENABLED(CONFIG_OF_MDIO))
+               return;
+
+       if (!node)
+               return;
+
+       if (!of_property_read_u32(node, "max-speed", &max_speed)) {
+               /* The default values for phydev->supported are provided by the PHY
+                * driver "features" member, we want to reset to sane defaults fist
+                * before supporting higher speeds.
+                */
+               phydev->supported &= PHY_DEFAULT_FEATURES;
+
+               switch (max_speed) {
+               default:
+                       return;
+
+               case SPEED_1000:
+                       phydev->supported |= PHY_1000BT_FEATURES;
+               case SPEED_100:
+                       phydev->supported |= PHY_100BT_FEATURES;
+               case SPEED_10:
+                       phydev->supported |= PHY_10BT_FEATURES;
+               }
+       }
+}
+
 /**
  * phy_probe - probe and init a PHY device
  * @dev: device to probe and init
@@ -1203,7 +1233,8 @@ static int phy_probe(struct device *dev)
         * or both of these values
         */
        phydev->supported = phydrv->features;
-       phydev->advertising = phydrv->features;
+       of_set_phy_supported(phydev);
+       phydev->advertising = phydev->supported;
 
        /* Set the state to READY by default */
        phydev->state = PHY_READY;
@@ -1296,7 +1327,9 @@ static struct phy_driver genphy_driver[] = {
        .name           = "Generic PHY",
        .soft_reset     = genphy_soft_reset,
        .config_init    = genphy_config_init,
-       .features       = 0,
+       .features       = PHY_GBIT_FEATURES | SUPPORTED_MII |
+                         SUPPORTED_AUI | SUPPORTED_FIBRE |
+                         SUPPORTED_BNC,
        .config_aneg    = genphy_config_aneg,
        .aneg_done      = genphy_aneg_done,
        .read_status    = genphy_read_status,
index 11f34813e23fb5423ccf90cf9d25e208ed2275f3..180c49479c42f9b4a19f070056b782923de5084c 100644 (file)
@@ -249,8 +249,7 @@ static int __init smsc_init(void)
 
 static void __exit smsc_exit(void)
 {
-       return phy_drivers_unregister(smsc_phy_driver,
-               ARRAY_SIZE(smsc_phy_driver));
+       phy_drivers_unregister(smsc_phy_driver, ARRAY_SIZE(smsc_phy_driver));
 }
 
 MODULE_DESCRIPTION("SMSC PHY driver");
index 14372c65a7e8209b5f97da6416ef80d1f41a522c..5dc0935da99c52a07ba7c09da27503b8ee0b2695 100644 (file)
@@ -319,8 +319,7 @@ static int __init vsc82xx_init(void)
 
 static void __exit vsc82xx_exit(void)
 {
-       return phy_drivers_unregister(vsc82xx_driver,
-               ARRAY_SIZE(vsc82xx_driver));
+       phy_drivers_unregister(vsc82xx_driver, ARRAY_SIZE(vsc82xx_driver));
 }
 
 module_init(vsc82xx_init);
index a8497183ff8b079985b7930fd5eb26dc9fb44813..dac7a0d9bb46e5d9d2385250f990a2a0acf5996c 100644 (file)
@@ -494,7 +494,7 @@ static int rionet_setup_netdev(struct rio_mport *mport, struct net_device *ndev)
        ndev->mtu = RIO_MAX_MSG_SIZE - 14;
        ndev->features = NETIF_F_LLTX;
        SET_NETDEV_DEV(ndev, &mport->dev);
-       SET_ETHTOOL_OPS(ndev, &rionet_ethtool_ops);
+       ndev->ethtool_ops = &rionet_ethtool_ops;
 
        spin_lock_init(&rnet->lock);
        spin_lock_init(&rnet->tx_lock);
index cc70ecfc70626789183e462c8b51d13f0c7fc8aa..ad4a94e9ff57c77574820fe3e188b12986feff55 100644 (file)
@@ -429,13 +429,13 @@ static void slip_write_wakeup(struct tty_struct *tty)
        if (!sl || sl->magic != SLIP_MAGIC || !netif_running(sl->dev))
                return;
 
-       spin_lock(&sl->lock);
+       spin_lock_bh(&sl->lock);
        if (sl->xleft <= 0)  {
                /* Now serial buffer is almost free & we can start
                 * transmission of another packet */
                sl->dev->stats.tx_packets++;
                clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
-               spin_unlock(&sl->lock);
+               spin_unlock_bh(&sl->lock);
                sl_unlock(sl);
                return;
        }
@@ -443,7 +443,7 @@ static void slip_write_wakeup(struct tty_struct *tty)
        actual = tty->ops->write(tty, sl->xhead, sl->xleft);
        sl->xleft -= actual;
        sl->xhead += actual;
-       spin_unlock(&sl->lock);
+       spin_unlock_bh(&sl->lock);
 }
 
 static void sl_tx_timeout(struct net_device *dev)
index 33008c1d1d678756ae8fbae13238763c24cc603e..9a9ce8debefaaa5c70ac045f0ed7ab11034644cb 100644 (file)
@@ -968,7 +968,7 @@ static void team_port_disable(struct team *team,
 static void __team_compute_features(struct team *team)
 {
        struct team_port *port;
-       u32 vlan_features = TEAM_VLAN_FEATURES;
+       u32 vlan_features = TEAM_VLAN_FEATURES & NETIF_F_ALL_FOR_ALL;
        unsigned short max_hard_header_len = ETH_HLEN;
        unsigned int flags, dst_release_flag = IFF_XMIT_DST_RELEASE;
 
@@ -2834,8 +2834,10 @@ static int team_device_event(struct notifier_block *unused,
        case NETDEV_UP:
                if (netif_carrier_ok(dev))
                        team_port_change_check(port, true);
+               break;
        case NETDEV_DOWN:
                team_port_change_check(port, false);
+               break;
        case NETDEV_CHANGE:
                if (netif_running(port->dev))
                        team_port_change_check(port,
index ee328ba101e72a9e3f150d8c24068182be86abc5..98bad1fb1bfb1ce66ea4219c2e767256f05d6cbb 100644 (file)
@@ -498,12 +498,12 @@ static void tun_detach_all(struct net_device *dev)
        for (i = 0; i < n; i++) {
                tfile = rtnl_dereference(tun->tfiles[i]);
                BUG_ON(!tfile);
-               wake_up_all(&tfile->wq.wait);
+               tfile->socket.sk->sk_data_ready(tfile->socket.sk);
                RCU_INIT_POINTER(tfile->tun, NULL);
                --tun->numqueues;
        }
        list_for_each_entry(tfile, &tun->disabled, next) {
-               wake_up_all(&tfile->wq.wait);
+               tfile->socket.sk->sk_data_ready(tfile->socket.sk);
                RCU_INIT_POINTER(tfile->tun, NULL);
        }
        BUG_ON(tun->numqueues != 0);
@@ -807,8 +807,7 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
        /* Notify and wake up reader process */
        if (tfile->flags & TUN_FASYNC)
                kill_fasync(&tfile->fasync, SIGIO, POLL_IN);
-       wake_up_interruptible_poll(&tfile->wq.wait, POLLIN |
-                                  POLLRDNORM | POLLRDBAND);
+       tfile->socket.sk->sk_data_ready(tfile->socket.sk);
 
        rcu_read_unlock();
        return NETDEV_TX_OK;
@@ -965,7 +964,7 @@ static unsigned int tun_chr_poll(struct file *file, poll_table *wait)
 
        tun_debug(KERN_INFO, tun, "tun_chr_poll\n");
 
-       poll_wait(file, &tfile->wq.wait, wait);
+       poll_wait(file, sk_sleep(sk), wait);
 
        if (!skb_queue_empty(&sk->sk_receive_queue))
                mask |= POLLIN | POLLRDNORM;
@@ -1330,47 +1329,26 @@ done:
 static ssize_t tun_do_read(struct tun_struct *tun, struct tun_file *tfile,
                           const struct iovec *iv, ssize_t len, int noblock)
 {
-       DECLARE_WAITQUEUE(wait, current);
        struct sk_buff *skb;
        ssize_t ret = 0;
+       int peeked, err, off = 0;
 
        tun_debug(KERN_INFO, tun, "tun_do_read\n");
 
-       if (unlikely(!noblock))
-               add_wait_queue(&tfile->wq.wait, &wait);
-       while (len) {
-               if (unlikely(!noblock))
-                       current->state = TASK_INTERRUPTIBLE;
+       if (!len)
+               return ret;
 
-               /* Read frames from the queue */
-               if (!(skb = skb_dequeue(&tfile->socket.sk->sk_receive_queue))) {
-                       if (noblock) {
-                               ret = -EAGAIN;
-                               break;
-                       }
-                       if (signal_pending(current)) {
-                               ret = -ERESTARTSYS;
-                               break;
-                       }
-                       if (tun->dev->reg_state != NETREG_REGISTERED) {
-                               ret = -EIO;
-                               break;
-                       }
-
-                       /* Nothing to read, let's sleep */
-                       schedule();
-                       continue;
-               }
+       if (tun->dev->reg_state != NETREG_REGISTERED)
+               return -EIO;
 
+       /* Read frames from queue */
+       skb = __skb_recv_datagram(tfile->socket.sk, noblock ? MSG_DONTWAIT : 0,
+                                 &peeked, &off, &err);
+       if (skb) {
                ret = tun_put_user(tun, tfile, skb, iv, len);
                kfree_skb(skb);
-               break;
-       }
-
-       if (unlikely(!noblock)) {
-               current->state = TASK_RUNNING;
-               remove_wait_queue(&tfile->wq.wait, &wait);
-       }
+       } else
+               ret = err;
 
        return ret;
 }
@@ -2199,8 +2177,8 @@ static int tun_chr_open(struct inode *inode, struct file * file)
        tfile->flags = 0;
        tfile->ifindex = 0;
 
-       rcu_assign_pointer(tfile->socket.wq, &tfile->wq);
        init_waitqueue_head(&tfile->wq.wait);
+       RCU_INIT_POINTER(tfile->socket.wq, &tfile->wq);
 
        tfile->socket.file = file;
        tfile->socket.ops = &tun_socket_ops;
index 630caf48f63aab7c2023d4cadb7044d23a2af86d..8cfc3bb0c6a672a288784ab0dd5f09597265c39d 100644 (file)
@@ -793,7 +793,7 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id
 
        netdev->netdev_ops = &catc_netdev_ops;
        netdev->watchdog_timeo = TX_TIMEOUT;
-       SET_ETHTOOL_OPS(netdev, &ops);
+       netdev->ethtool_ops = &ops;
 
        catc->usbdev = usbdev;
        catc->netdev = netdev;
index c9f3281506af568e534a47789418b466a0a77adc..5ee7a1dbc023833a5907a6db48695600bb728fd9 100644 (file)
 #include <net/ipv6.h>
 #include <net/addrconf.h>
 
+/* alternative VLAN for IP session 0 if not untagged */
+#define MBIM_IPS0_VID  4094
+
 /* driver specific data - must match cdc_ncm usage */
 struct cdc_mbim_state {
        struct cdc_ncm_ctx *ctx;
        atomic_t pmcount;
        struct usb_driver *subdriver;
-       struct usb_interface *control;
-       struct usb_interface *data;
+       unsigned long _unused;
+       unsigned long flags;
+};
+
+/* flags for the cdc_mbim_state.flags field */
+enum cdc_mbim_flags {
+       FLAG_IPS0_VLAN = 1 << 0,        /* IP session 0 is tagged  */
 };
 
 /* using a counter to merge subdriver requests with our own into a combined state */
@@ -62,16 +70,91 @@ static int cdc_mbim_wdm_manage_power(struct usb_interface *intf, int status)
        return cdc_mbim_manage_power(dev, status);
 }
 
+static int cdc_mbim_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
+{
+       struct usbnet *dev = netdev_priv(netdev);
+       struct cdc_mbim_state *info = (void *)&dev->data;
+
+       /* creation of this VLAN is a request to tag IP session 0 */
+       if (vid == MBIM_IPS0_VID)
+               info->flags |= FLAG_IPS0_VLAN;
+       else
+               if (vid >= 512) /* we don't map these to MBIM session */
+                       return -EINVAL;
+       return 0;
+}
+
+static int cdc_mbim_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
+{
+       struct usbnet *dev = netdev_priv(netdev);
+       struct cdc_mbim_state *info = (void *)&dev->data;
+
+       /* this is a request for an untagged IP session 0 */
+       if (vid == MBIM_IPS0_VID)
+               info->flags &= ~FLAG_IPS0_VLAN;
+       return 0;
+}
+
+static const struct net_device_ops cdc_mbim_netdev_ops = {
+       .ndo_open             = usbnet_open,
+       .ndo_stop             = usbnet_stop,
+       .ndo_start_xmit       = usbnet_start_xmit,
+       .ndo_tx_timeout       = usbnet_tx_timeout,
+       .ndo_change_mtu       = usbnet_change_mtu,
+       .ndo_set_mac_address  = eth_mac_addr,
+       .ndo_validate_addr    = eth_validate_addr,
+       .ndo_vlan_rx_add_vid  = cdc_mbim_rx_add_vid,
+       .ndo_vlan_rx_kill_vid = cdc_mbim_rx_kill_vid,
+};
+
+/* Change the control interface altsetting and update the .driver_info
+ * pointer if the matching entry after changing class codes points to
+ * a different struct
+ */
+static int cdc_mbim_set_ctrlalt(struct usbnet *dev, struct usb_interface *intf, u8 alt)
+{
+       struct usb_driver *driver = to_usb_driver(intf->dev.driver);
+       const struct usb_device_id *id;
+       struct driver_info *info;
+       int ret;
+
+       ret = usb_set_interface(dev->udev,
+                               intf->cur_altsetting->desc.bInterfaceNumber,
+                               alt);
+       if (ret)
+               return ret;
+
+       id = usb_match_id(intf, driver->id_table);
+       if (!id)
+               return -ENODEV;
+
+       info = (struct driver_info *)id->driver_info;
+       if (info != dev->driver_info) {
+               dev_dbg(&intf->dev, "driver_info updated to '%s'\n",
+                       info->description);
+               dev->driver_info = info;
+       }
+       return 0;
+}
 
 static int cdc_mbim_bind(struct usbnet *dev, struct usb_interface *intf)
 {
        struct cdc_ncm_ctx *ctx;
        struct usb_driver *subdriver = ERR_PTR(-ENODEV);
        int ret = -ENODEV;
-       u8 data_altsetting = cdc_ncm_select_altsetting(dev, intf);
+       u8 data_altsetting = 1;
        struct cdc_mbim_state *info = (void *)&dev->data;
 
-       /* Probably NCM, defer for cdc_ncm_bind */
+       /* should we change control altsetting on a NCM/MBIM function? */
+       if (cdc_ncm_select_altsetting(intf) == CDC_NCM_COMM_ALTSETTING_MBIM) {
+               data_altsetting = CDC_NCM_DATA_ALTSETTING_MBIM;
+               ret = cdc_mbim_set_ctrlalt(dev, intf, CDC_NCM_COMM_ALTSETTING_MBIM);
+               if (ret)
+                       goto err;
+               ret = -ENODEV;
+       }
+
+       /* we will hit this for NCM/MBIM functions if prefer_mbim is false */
        if (!cdc_ncm_comm_intf_is_mbim(intf->cur_altsetting))
                goto err;
 
@@ -101,7 +184,10 @@ static int cdc_mbim_bind(struct usbnet *dev, struct usb_interface *intf)
        dev->net->flags |= IFF_NOARP;
 
        /* no need to put the VLAN tci in the packet headers */
-       dev->net->features |= NETIF_F_HW_VLAN_CTAG_TX;
+       dev->net->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_FILTER;
+
+       /* monitor VLAN additions and removals */
+       dev->net->netdev_ops = &cdc_mbim_netdev_ops;
 err:
        return ret;
 }
@@ -120,6 +206,16 @@ static void cdc_mbim_unbind(struct usbnet *dev, struct usb_interface *intf)
        cdc_ncm_unbind(dev, intf);
 }
 
+/* verify that the ethernet protocol is IPv4 or IPv6 */
+static bool is_ip_proto(__be16 proto)
+{
+       switch (proto) {
+       case htons(ETH_P_IP):
+       case htons(ETH_P_IPV6):
+               return true;
+       }
+       return false;
+}
 
 static struct sk_buff *cdc_mbim_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags)
 {
@@ -128,6 +224,7 @@ static struct sk_buff *cdc_mbim_tx_fixup(struct usbnet *dev, struct sk_buff *skb
        struct cdc_ncm_ctx *ctx = info->ctx;
        __le32 sign = cpu_to_le32(USB_CDC_MBIM_NDP16_IPS_SIGN);
        u16 tci = 0;
+       bool is_ip;
        u8 *c;
 
        if (!ctx)
@@ -137,29 +234,50 @@ static struct sk_buff *cdc_mbim_tx_fixup(struct usbnet *dev, struct sk_buff *skb
                if (skb->len <= ETH_HLEN)
                        goto error;
 
+               /* Some applications using e.g. packet sockets will
+                * bypass the VLAN acceleration and create tagged
+                * ethernet frames directly.  We primarily look for
+                * the accelerated out-of-band tag, but fall back if
+                * required
+                */
+               skb_reset_mac_header(skb);
+               if (vlan_get_tag(skb, &tci) < 0 && skb->len > VLAN_ETH_HLEN &&
+                   __vlan_get_tag(skb, &tci) == 0) {
+                       is_ip = is_ip_proto(vlan_eth_hdr(skb)->h_vlan_encapsulated_proto);
+                       skb_pull(skb, VLAN_ETH_HLEN);
+               } else {
+                       is_ip = is_ip_proto(eth_hdr(skb)->h_proto);
+                       skb_pull(skb, ETH_HLEN);
+               }
+
+               /* Is IP session <0> tagged too? */
+               if (info->flags & FLAG_IPS0_VLAN) {
+                       /* drop all untagged packets */
+                       if (!tci)
+                               goto error;
+                       /* map MBIM_IPS0_VID to IPS<0> */
+                       if (tci == MBIM_IPS0_VID)
+                               tci = 0;
+               }
+
                /* mapping VLANs to MBIM sessions:
-                *   no tag     => IPS session <0>
+                *   no tag     => IPS session <0> if !FLAG_IPS0_VLAN
                 *   1 - 255    => IPS session <vlanid>
                 *   256 - 511  => DSS session <vlanid - 256>
-                *   512 - 4095 => unsupported, drop
+                *   512 - 4093 => unsupported, drop
+                *   4094       => IPS session <0> if FLAG_IPS0_VLAN
                 */
-               vlan_get_tag(skb, &tci);
 
                switch (tci & 0x0f00) {
                case 0x0000: /* VLAN ID 0 - 255 */
-                       /* verify that datagram is IPv4 or IPv6 */
-                       skb_reset_mac_header(skb);
-                       switch (eth_hdr(skb)->h_proto) {
-                       case htons(ETH_P_IP):
-                       case htons(ETH_P_IPV6):
-                               break;
-                       default:
+                       if (!is_ip)
                                goto error;
-                       }
                        c = (u8 *)&sign;
                        c[3] = tci;
                        break;
                case 0x0100: /* VLAN ID 256 - 511 */
+                       if (is_ip)
+                               goto error;
                        sign = cpu_to_le32(USB_CDC_MBIM_NDP16_DSS_SIGN);
                        c = (u8 *)&sign;
                        c[3] = tci;
@@ -169,7 +287,6 @@ static struct sk_buff *cdc_mbim_tx_fixup(struct usbnet *dev, struct sk_buff *skb
                                  "unsupported tci=0x%04x\n", tci);
                        goto error;
                }
-               skb_pull(skb, ETH_HLEN);
        }
 
        spin_lock_bh(&ctx->mtx);
@@ -204,17 +321,23 @@ static void do_neigh_solicit(struct usbnet *dev, u8 *buf, u16 tci)
                return;
 
        /* need to send the NA on the VLAN dev, if any */
-       if (tci)
-               netdev = __vlan_find_dev_deep(dev->net, htons(ETH_P_8021Q),
-                                             tci);
-       else
+       rcu_read_lock();
+       if (tci) {
+               netdev = __vlan_find_dev_deep_rcu(dev->net, htons(ETH_P_8021Q),
+                                                 tci);
+               if (!netdev) {
+                       rcu_read_unlock();
+                       return;
+               }
+       } else {
                netdev = dev->net;
-       if (!netdev)
-               return;
+       }
+       dev_hold(netdev);
+       rcu_read_unlock();
 
        in6_dev = in6_dev_get(netdev);
        if (!in6_dev)
-               return;
+               goto out;
        is_router = !!in6_dev->cnf.forwarding;
        in6_dev_put(in6_dev);
 
@@ -224,6 +347,8 @@ static void do_neigh_solicit(struct usbnet *dev, u8 *buf, u16 tci)
                                 true /* solicited */,
                                 false /* override */,
                                 true /* inc_opt */);
+out:
+       dev_put(netdev);
 }
 
 static bool is_neigh_solicit(u8 *buf, size_t len)
@@ -243,7 +368,7 @@ static struct sk_buff *cdc_mbim_process_dgram(struct usbnet *dev, u8 *buf, size_
        __be16 proto = htons(ETH_P_802_3);
        struct sk_buff *skb = NULL;
 
-       if (tci < 256) { /* IPS session? */
+       if (tci < 256 || tci == MBIM_IPS0_VID) { /* IPS session? */
                if (len < sizeof(struct iphdr))
                        goto err;
 
@@ -295,6 +420,7 @@ static int cdc_mbim_rx_fixup(struct usbnet *dev, struct sk_buff *skb_in)
        struct usb_cdc_ncm_dpe16 *dpe16;
        int ndpoffset;
        int loopcount = 50; /* arbitrary max preventing infinite loop */
+       u32 payload = 0;
        u8 *c;
        u16 tci;
 
@@ -313,6 +439,9 @@ next_ndp:
        case cpu_to_le32(USB_CDC_MBIM_NDP16_IPS_SIGN):
                c = (u8 *)&ndp16->dwSignature;
                tci = c[3];
+               /* tag IPS<0> packets too if MBIM_IPS0_VID exists */
+               if (!tci && info->flags & FLAG_IPS0_VLAN)
+                       tci = MBIM_IPS0_VID;
                break;
        case cpu_to_le32(USB_CDC_MBIM_NDP16_DSS_SIGN):
                c = (u8 *)&ndp16->dwSignature;
@@ -354,6 +483,7 @@ next_ndp:
                        if (!skb)
                                goto error;
                        usbnet_skb_return(dev, skb);
+                       payload += len; /* count payload bytes in this NTB */
                }
        }
 err_ndp:
@@ -362,6 +492,10 @@ err_ndp:
        if (ndpoffset && loopcount--)
                goto next_ndp;
 
+       /* update stats */
+       ctx->rx_overhead += skb_in->len - payload;
+       ctx->rx_ntbs++;
+
        return 1;
 error:
        return 0;
index 549dbac710ed5f576f84cedf375df8588e5a7dc5..93c9ca9924ebe3c29078a7ffe269ba490a4f6dbc 100644 (file)
@@ -65,19 +65,270 @@ static void cdc_ncm_tx_timeout_start(struct cdc_ncm_ctx *ctx);
 static enum hrtimer_restart cdc_ncm_tx_timer_cb(struct hrtimer *hr_timer);
 static struct usb_driver cdc_ncm_driver;
 
-static int cdc_ncm_setup(struct usbnet *dev)
+struct cdc_ncm_stats {
+       char stat_string[ETH_GSTRING_LEN];
+       int sizeof_stat;
+       int stat_offset;
+};
+
+#define CDC_NCM_STAT(str, m) { \
+               .stat_string = str, \
+               .sizeof_stat = sizeof(((struct cdc_ncm_ctx *)0)->m), \
+               .stat_offset = offsetof(struct cdc_ncm_ctx, m) }
+#define CDC_NCM_SIMPLE_STAT(m) CDC_NCM_STAT(__stringify(m), m)
+
+static const struct cdc_ncm_stats cdc_ncm_gstrings_stats[] = {
+       CDC_NCM_SIMPLE_STAT(tx_reason_ntb_full),
+       CDC_NCM_SIMPLE_STAT(tx_reason_ndp_full),
+       CDC_NCM_SIMPLE_STAT(tx_reason_timeout),
+       CDC_NCM_SIMPLE_STAT(tx_reason_max_datagram),
+       CDC_NCM_SIMPLE_STAT(tx_overhead),
+       CDC_NCM_SIMPLE_STAT(tx_ntbs),
+       CDC_NCM_SIMPLE_STAT(rx_overhead),
+       CDC_NCM_SIMPLE_STAT(rx_ntbs),
+};
+
+static int cdc_ncm_get_sset_count(struct net_device __always_unused *netdev, int sset)
+{
+       switch (sset) {
+       case ETH_SS_STATS:
+               return ARRAY_SIZE(cdc_ncm_gstrings_stats);
+       default:
+               return -EOPNOTSUPP;
+       }
+}
+
+static void cdc_ncm_get_ethtool_stats(struct net_device *netdev,
+                                   struct ethtool_stats __always_unused *stats,
+                                   u64 *data)
 {
+       struct usbnet *dev = netdev_priv(netdev);
        struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
-       u32 val;
-       u8 flags;
-       u8 iface_no;
-       int err;
-       int eth_hlen;
-       u16 mbim_mtu;
-       u16 ntb_fmt_supported;
-       __le16 max_datagram_size;
+       int i;
+       char *p = NULL;
 
-       iface_no = ctx->control->cur_altsetting->desc.bInterfaceNumber;
+       for (i = 0; i < ARRAY_SIZE(cdc_ncm_gstrings_stats); i++) {
+               p = (char *)ctx + cdc_ncm_gstrings_stats[i].stat_offset;
+               data[i] = (cdc_ncm_gstrings_stats[i].sizeof_stat == sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
+       }
+}
+
+static void cdc_ncm_get_strings(struct net_device __always_unused *netdev, u32 stringset, u8 *data)
+{
+       u8 *p = data;
+       int i;
+
+       switch (stringset) {
+       case ETH_SS_STATS:
+               for (i = 0; i < ARRAY_SIZE(cdc_ncm_gstrings_stats); i++) {
+                       memcpy(p, cdc_ncm_gstrings_stats[i].stat_string, ETH_GSTRING_LEN);
+                       p += ETH_GSTRING_LEN;
+               }
+       }
+}
+
+static int cdc_ncm_get_coalesce(struct net_device *netdev,
+                               struct ethtool_coalesce *ec)
+{
+       struct usbnet *dev = netdev_priv(netdev);
+       struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
+
+       /* assuming maximum sized dgrams and ignoring NDPs */
+       ec->rx_max_coalesced_frames = ctx->rx_max / ctx->max_datagram_size;
+       ec->tx_max_coalesced_frames = ctx->tx_max / ctx->max_datagram_size;
+
+       /* the timer will fire CDC_NCM_TIMER_PENDING_CNT times in a row */
+       ec->tx_coalesce_usecs = ctx->timer_interval / (NSEC_PER_USEC / CDC_NCM_TIMER_PENDING_CNT);
+       return 0;
+}
+
+static void cdc_ncm_update_rxtx_max(struct usbnet *dev, u32 new_rx, u32 new_tx);
+
+static int cdc_ncm_set_coalesce(struct net_device *netdev,
+                               struct ethtool_coalesce *ec)
+{
+       struct usbnet *dev = netdev_priv(netdev);
+       struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
+       u32 new_rx_max = ctx->rx_max;
+       u32 new_tx_max = ctx->tx_max;
+
+       /* assuming maximum sized dgrams and a single NDP */
+       if (ec->rx_max_coalesced_frames)
+               new_rx_max = ec->rx_max_coalesced_frames * ctx->max_datagram_size;
+       if (ec->tx_max_coalesced_frames)
+               new_tx_max = ec->tx_max_coalesced_frames * ctx->max_datagram_size;
+
+       if (ec->tx_coalesce_usecs &&
+           (ec->tx_coalesce_usecs < CDC_NCM_TIMER_INTERVAL_MIN * CDC_NCM_TIMER_PENDING_CNT ||
+            ec->tx_coalesce_usecs > CDC_NCM_TIMER_INTERVAL_MAX * CDC_NCM_TIMER_PENDING_CNT))
+               return -EINVAL;
+
+       spin_lock_bh(&ctx->mtx);
+       ctx->timer_interval = ec->tx_coalesce_usecs * (NSEC_PER_USEC / CDC_NCM_TIMER_PENDING_CNT);
+       if (!ctx->timer_interval)
+               ctx->tx_timer_pending = 0;
+       spin_unlock_bh(&ctx->mtx);
+
+       /* inform device of new values */
+       if (new_rx_max != ctx->rx_max || new_tx_max != ctx->tx_max)
+               cdc_ncm_update_rxtx_max(dev, new_rx_max, new_tx_max);
+       return 0;
+}
+
+static const struct ethtool_ops cdc_ncm_ethtool_ops = {
+       .get_settings      = usbnet_get_settings,
+       .set_settings      = usbnet_set_settings,
+       .get_link          = usbnet_get_link,
+       .nway_reset        = usbnet_nway_reset,
+       .get_drvinfo       = usbnet_get_drvinfo,
+       .get_msglevel      = usbnet_get_msglevel,
+       .set_msglevel      = usbnet_set_msglevel,
+       .get_ts_info       = ethtool_op_get_ts_info,
+       .get_sset_count    = cdc_ncm_get_sset_count,
+       .get_strings       = cdc_ncm_get_strings,
+       .get_ethtool_stats = cdc_ncm_get_ethtool_stats,
+       .get_coalesce      = cdc_ncm_get_coalesce,
+       .set_coalesce      = cdc_ncm_set_coalesce,
+};
+
+/* handle rx_max and tx_max changes */
+static void cdc_ncm_update_rxtx_max(struct usbnet *dev, u32 new_rx, u32 new_tx)
+{
+       struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
+       u8 iface_no = ctx->control->cur_altsetting->desc.bInterfaceNumber;
+       u32 val, max, min;
+
+       /* clamp new_rx to sane values */
+       min = USB_CDC_NCM_NTB_MIN_IN_SIZE;
+       max = min_t(u32, CDC_NCM_NTB_MAX_SIZE_RX, le32_to_cpu(ctx->ncm_parm.dwNtbInMaxSize));
+
+       /* dwNtbInMaxSize spec violation? Use MIN size for both limits */
+       if (max < min) {
+               dev_warn(&dev->intf->dev, "dwNtbInMaxSize=%u is too small. Using %u\n",
+                        le32_to_cpu(ctx->ncm_parm.dwNtbInMaxSize), min);
+               max = min;
+       }
+
+       val = clamp_t(u32, new_rx, min, max);
+       if (val != new_rx) {
+               dev_dbg(&dev->intf->dev, "rx_max must be in the [%u, %u] range. Using %u\n",
+                       min, max, val);
+       }
+
+       /* usbnet use these values for sizing rx queues */
+       dev->rx_urb_size = val;
+
+       /* inform device about NTB input size changes */
+       if (val != ctx->rx_max) {
+               __le32 dwNtbInMaxSize = cpu_to_le32(val);
+
+               dev_info(&dev->intf->dev, "setting rx_max = %u\n", val);
+
+               /* need to unlink rx urbs before increasing buffer size */
+               if (netif_running(dev->net) && dev->rx_urb_size > ctx->rx_max)
+                       usbnet_unlink_rx_urbs(dev);
+
+               /* tell device to use new size */
+               if (usbnet_write_cmd(dev, USB_CDC_SET_NTB_INPUT_SIZE,
+                                    USB_TYPE_CLASS | USB_DIR_OUT
+                                    | USB_RECIP_INTERFACE,
+                                    0, iface_no, &dwNtbInMaxSize, 4) < 0)
+                       dev_dbg(&dev->intf->dev, "Setting NTB Input Size failed\n");
+               else
+                       ctx->rx_max = val;
+       }
+
+       /* clamp new_tx to sane values */
+       min = ctx->max_datagram_size + ctx->max_ndp_size + sizeof(struct usb_cdc_ncm_nth16);
+       max = min_t(u32, CDC_NCM_NTB_MAX_SIZE_TX, le32_to_cpu(ctx->ncm_parm.dwNtbOutMaxSize));
+
+       /* some devices set dwNtbOutMaxSize too low for the above default */
+       min = min(min, max);
+
+       val = clamp_t(u32, new_tx, min, max);
+       if (val != new_tx) {
+               dev_dbg(&dev->intf->dev, "tx_max must be in the [%u, %u] range. Using %u\n",
+                       min, max, val);
+       }
+       if (val != ctx->tx_max)
+               dev_info(&dev->intf->dev, "setting tx_max = %u\n", val);
+
+       /* Adding a pad byte here if necessary simplifies the handling
+        * in cdc_ncm_fill_tx_frame, making tx_max always represent
+        * the real skb max size.
+        *
+        * We cannot use dev->maxpacket here because this is called from
+        * .bind which is called before usbnet sets up dev->maxpacket
+        */
+       if (val != le32_to_cpu(ctx->ncm_parm.dwNtbOutMaxSize) &&
+           val % usb_maxpacket(dev->udev, dev->out, 1) == 0)
+               val++;
+
+       /* we might need to flush any pending tx buffers if running */
+       if (netif_running(dev->net) && val > ctx->tx_max) {
+               netif_tx_lock_bh(dev->net);
+               usbnet_start_xmit(NULL, dev->net);
+               ctx->tx_max = val;
+               netif_tx_unlock_bh(dev->net);
+       } else {
+               ctx->tx_max = val;
+       }
+
+       dev->hard_mtu = ctx->tx_max;
+
+       /* max qlen depend on hard_mtu and rx_urb_size */
+       usbnet_update_max_qlen(dev);
+
+       /* never pad more than 3 full USB packets per transfer */
+       ctx->min_tx_pkt = clamp_t(u16, ctx->tx_max - 3 * usb_maxpacket(dev->udev, dev->out, 1),
+                                 CDC_NCM_MIN_TX_PKT, ctx->tx_max);
+}
+
+/* helpers for NCM and MBIM differences */
+static u8 cdc_ncm_flags(struct usbnet *dev)
+{
+       struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
+
+       if (cdc_ncm_comm_intf_is_mbim(dev->intf->cur_altsetting) && ctx->mbim_desc)
+               return ctx->mbim_desc->bmNetworkCapabilities;
+       if (ctx->func_desc)
+               return ctx->func_desc->bmNetworkCapabilities;
+       return 0;
+}
+
+static int cdc_ncm_eth_hlen(struct usbnet *dev)
+{
+       if (cdc_ncm_comm_intf_is_mbim(dev->intf->cur_altsetting))
+               return 0;
+       return ETH_HLEN;
+}
+
+static u32 cdc_ncm_min_dgram_size(struct usbnet *dev)
+{
+       if (cdc_ncm_comm_intf_is_mbim(dev->intf->cur_altsetting))
+               return CDC_MBIM_MIN_DATAGRAM_SIZE;
+       return CDC_NCM_MIN_DATAGRAM_SIZE;
+}
+
+static u32 cdc_ncm_max_dgram_size(struct usbnet *dev)
+{
+       struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
+
+       if (cdc_ncm_comm_intf_is_mbim(dev->intf->cur_altsetting) && ctx->mbim_desc)
+               return le16_to_cpu(ctx->mbim_desc->wMaxSegmentSize);
+       if (ctx->ether_desc)
+               return le16_to_cpu(ctx->ether_desc->wMaxSegmentSize);
+       return CDC_NCM_MAX_DATAGRAM_SIZE;
+}
+
+/* initial one-time device setup.  MUST be called with the data interface
+ * in altsetting 0
+ */
+static int cdc_ncm_init(struct usbnet *dev)
+{
+       struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
+       u8 iface_no = ctx->control->cur_altsetting->desc.bInterfaceNumber;
+       int err;
 
        err = usbnet_read_cmd(dev, USB_CDC_GET_NTB_PARAMETERS,
                              USB_TYPE_CLASS | USB_DIR_IN
@@ -89,7 +340,36 @@ static int cdc_ncm_setup(struct usbnet *dev)
                return err; /* GET_NTB_PARAMETERS is required */
        }
 
-       /* read correct set of parameters according to device mode */
+       /* set CRC Mode */
+       if (cdc_ncm_flags(dev) & USB_CDC_NCM_NCAP_CRC_MODE) {
+               dev_dbg(&dev->intf->dev, "Setting CRC mode off\n");
+               err = usbnet_write_cmd(dev, USB_CDC_SET_CRC_MODE,
+                                      USB_TYPE_CLASS | USB_DIR_OUT
+                                      | USB_RECIP_INTERFACE,
+                                      USB_CDC_NCM_CRC_NOT_APPENDED,
+                                      iface_no, NULL, 0);
+               if (err < 0)
+                       dev_err(&dev->intf->dev, "SET_CRC_MODE failed\n");
+       }
+
+       /* set NTB format, if both formats are supported.
+        *
+        * "The host shall only send this command while the NCM Data
+        *  Interface is in alternate setting 0."
+        */
+       if (le16_to_cpu(ctx->ncm_parm.bmNtbFormatsSupported) &
+                                               USB_CDC_NCM_NTB32_SUPPORTED) {
+               dev_dbg(&dev->intf->dev, "Setting NTB format to 16-bit\n");
+               err = usbnet_write_cmd(dev, USB_CDC_SET_NTB_FORMAT,
+                                      USB_TYPE_CLASS | USB_DIR_OUT
+                                      | USB_RECIP_INTERFACE,
+                                      USB_CDC_NCM_NTB16_FORMAT,
+                                      iface_no, NULL, 0);
+               if (err < 0)
+                       dev_err(&dev->intf->dev, "SET_NTB_FORMAT failed\n");
+       }
+
+       /* set initial device values */
        ctx->rx_max = le32_to_cpu(ctx->ncm_parm.dwNtbInMaxSize);
        ctx->tx_max = le32_to_cpu(ctx->ncm_parm.dwNtbOutMaxSize);
        ctx->tx_remainder = le16_to_cpu(ctx->ncm_parm.wNdpOutPayloadRemainder);
@@ -97,72 +377,79 @@ static int cdc_ncm_setup(struct usbnet *dev)
        ctx->tx_ndp_modulus = le16_to_cpu(ctx->ncm_parm.wNdpOutAlignment);
        /* devices prior to NCM Errata shall set this field to zero */
        ctx->tx_max_datagrams = le16_to_cpu(ctx->ncm_parm.wNtbOutMaxDatagrams);
-       ntb_fmt_supported = le16_to_cpu(ctx->ncm_parm.bmNtbFormatsSupported);
-
-       /* there are some minor differences in NCM and MBIM defaults */
-       if (cdc_ncm_comm_intf_is_mbim(ctx->control->cur_altsetting)) {
-               if (!ctx->mbim_desc)
-                       return -EINVAL;
-               eth_hlen = 0;
-               flags = ctx->mbim_desc->bmNetworkCapabilities;
-               ctx->max_datagram_size = le16_to_cpu(ctx->mbim_desc->wMaxSegmentSize);
-               if (ctx->max_datagram_size < CDC_MBIM_MIN_DATAGRAM_SIZE)
-                       ctx->max_datagram_size = CDC_MBIM_MIN_DATAGRAM_SIZE;
-       } else {
-               if (!ctx->func_desc)
-                       return -EINVAL;
-               eth_hlen = ETH_HLEN;
-               flags = ctx->func_desc->bmNetworkCapabilities;
-               ctx->max_datagram_size = le16_to_cpu(ctx->ether_desc->wMaxSegmentSize);
-               if (ctx->max_datagram_size < CDC_NCM_MIN_DATAGRAM_SIZE)
-                       ctx->max_datagram_size = CDC_NCM_MIN_DATAGRAM_SIZE;
-       }
-
-       /* common absolute max for NCM and MBIM */
-       if (ctx->max_datagram_size > CDC_NCM_MAX_DATAGRAM_SIZE)
-               ctx->max_datagram_size = CDC_NCM_MAX_DATAGRAM_SIZE;
 
        dev_dbg(&dev->intf->dev,
                "dwNtbInMaxSize=%u dwNtbOutMaxSize=%u wNdpOutPayloadRemainder=%u wNdpOutDivisor=%u wNdpOutAlignment=%u wNtbOutMaxDatagrams=%u flags=0x%x\n",
                ctx->rx_max, ctx->tx_max, ctx->tx_remainder, ctx->tx_modulus,
-               ctx->tx_ndp_modulus, ctx->tx_max_datagrams, flags);
+               ctx->tx_ndp_modulus, ctx->tx_max_datagrams, cdc_ncm_flags(dev));
 
        /* max count of tx datagrams */
        if ((ctx->tx_max_datagrams == 0) ||
                        (ctx->tx_max_datagrams > CDC_NCM_DPT_DATAGRAMS_MAX))
                ctx->tx_max_datagrams = CDC_NCM_DPT_DATAGRAMS_MAX;
 
-       /* verify maximum size of received NTB in bytes */
-       if (ctx->rx_max < USB_CDC_NCM_NTB_MIN_IN_SIZE) {
-               dev_dbg(&dev->intf->dev, "Using min receive length=%d\n",
-                       USB_CDC_NCM_NTB_MIN_IN_SIZE);
-               ctx->rx_max = USB_CDC_NCM_NTB_MIN_IN_SIZE;
-       }
+       /* set up maximum NDP size */
+       ctx->max_ndp_size = sizeof(struct usb_cdc_ncm_ndp16) + (ctx->tx_max_datagrams + 1) * sizeof(struct usb_cdc_ncm_dpe16);
 
-       if (ctx->rx_max > CDC_NCM_NTB_MAX_SIZE_RX) {
-               dev_dbg(&dev->intf->dev, "Using default maximum receive length=%d\n",
-                       CDC_NCM_NTB_MAX_SIZE_RX);
-               ctx->rx_max = CDC_NCM_NTB_MAX_SIZE_RX;
-       }
+       /* initial coalescing timer interval */
+       ctx->timer_interval = CDC_NCM_TIMER_INTERVAL_USEC * NSEC_PER_USEC;
 
-       /* inform device about NTB input size changes */
-       if (ctx->rx_max != le32_to_cpu(ctx->ncm_parm.dwNtbInMaxSize)) {
-               __le32 dwNtbInMaxSize = cpu_to_le32(ctx->rx_max);
+       return 0;
+}
 
-               err = usbnet_write_cmd(dev, USB_CDC_SET_NTB_INPUT_SIZE,
-                                      USB_TYPE_CLASS | USB_DIR_OUT
-                                      | USB_RECIP_INTERFACE,
-                                      0, iface_no, &dwNtbInMaxSize, 4);
-               if (err < 0)
-                       dev_dbg(&dev->intf->dev, "Setting NTB Input Size failed\n");
+/* set a new max datagram size */
+static void cdc_ncm_set_dgram_size(struct usbnet *dev, int new_size)
+{
+       struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
+       u8 iface_no = ctx->control->cur_altsetting->desc.bInterfaceNumber;
+       __le16 max_datagram_size;
+       u16 mbim_mtu;
+       int err;
+
+       /* set default based on descriptors */
+       ctx->max_datagram_size = clamp_t(u32, new_size,
+                                        cdc_ncm_min_dgram_size(dev),
+                                        CDC_NCM_MAX_DATAGRAM_SIZE);
+
+       /* inform the device about the selected Max Datagram Size? */
+       if (!(cdc_ncm_flags(dev) & USB_CDC_NCM_NCAP_MAX_DATAGRAM_SIZE))
+               goto out;
+
+       /* read current mtu value from device */
+       err = usbnet_read_cmd(dev, USB_CDC_GET_MAX_DATAGRAM_SIZE,
+                             USB_TYPE_CLASS | USB_DIR_IN | USB_RECIP_INTERFACE,
+                             0, iface_no, &max_datagram_size, 2);
+       if (err < 0) {
+               dev_dbg(&dev->intf->dev, "GET_MAX_DATAGRAM_SIZE failed\n");
+               goto out;
        }
 
-       /* verify maximum size of transmitted NTB in bytes */
-       if (ctx->tx_max > CDC_NCM_NTB_MAX_SIZE_TX) {
-               dev_dbg(&dev->intf->dev, "Using default maximum transmit length=%d\n",
-                       CDC_NCM_NTB_MAX_SIZE_TX);
-               ctx->tx_max = CDC_NCM_NTB_MAX_SIZE_TX;
+       if (le16_to_cpu(max_datagram_size) == ctx->max_datagram_size)
+               goto out;
+
+       max_datagram_size = cpu_to_le16(ctx->max_datagram_size);
+       err = usbnet_write_cmd(dev, USB_CDC_SET_MAX_DATAGRAM_SIZE,
+                              USB_TYPE_CLASS | USB_DIR_OUT | USB_RECIP_INTERFACE,
+                              0, iface_no, &max_datagram_size, 2);
+       if (err < 0)
+               dev_dbg(&dev->intf->dev, "SET_MAX_DATAGRAM_SIZE failed\n");
+
+out:
+       /* set MTU to max supported by the device if necessary */
+       dev->net->mtu = min_t(int, dev->net->mtu, ctx->max_datagram_size - cdc_ncm_eth_hlen(dev));
+
+       /* do not exceed operater preferred MTU */
+       if (ctx->mbim_extended_desc) {
+               mbim_mtu = le16_to_cpu(ctx->mbim_extended_desc->wMTU);
+               if (mbim_mtu != 0 && mbim_mtu < dev->net->mtu)
+                       dev->net->mtu = mbim_mtu;
        }
+}
+
+static void cdc_ncm_fix_modulus(struct usbnet *dev)
+{
+       struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
+       u32 val;
 
        /*
         * verify that the structure alignment is:
@@ -199,68 +486,31 @@ static int cdc_ncm_setup(struct usbnet *dev)
        }
 
        /* adjust TX-remainder according to NCM specification. */
-       ctx->tx_remainder = ((ctx->tx_remainder - eth_hlen) &
+       ctx->tx_remainder = ((ctx->tx_remainder - cdc_ncm_eth_hlen(dev)) &
                             (ctx->tx_modulus - 1));
+}
 
-       /* additional configuration */
-
-       /* set CRC Mode */
-       if (flags & USB_CDC_NCM_NCAP_CRC_MODE) {
-               err = usbnet_write_cmd(dev, USB_CDC_SET_CRC_MODE,
-                                      USB_TYPE_CLASS | USB_DIR_OUT
-                                      | USB_RECIP_INTERFACE,
-                                      USB_CDC_NCM_CRC_NOT_APPENDED,
-                                      iface_no, NULL, 0);
-               if (err < 0)
-                       dev_dbg(&dev->intf->dev, "Setting CRC mode off failed\n");
-       }
-
-       /* set NTB format, if both formats are supported */
-       if (ntb_fmt_supported & USB_CDC_NCM_NTH32_SIGN) {
-               err = usbnet_write_cmd(dev, USB_CDC_SET_NTB_FORMAT,
-                                      USB_TYPE_CLASS | USB_DIR_OUT
-                                      | USB_RECIP_INTERFACE,
-                                      USB_CDC_NCM_NTB16_FORMAT,
-                                      iface_no, NULL, 0);
-               if (err < 0)
-                       dev_dbg(&dev->intf->dev, "Setting NTB format to 16-bit failed\n");
-       }
-
-       /* inform the device about the selected Max Datagram Size */
-       if (!(flags & USB_CDC_NCM_NCAP_MAX_DATAGRAM_SIZE))
-               goto out;
-
-       /* read current mtu value from device */
-       err = usbnet_read_cmd(dev, USB_CDC_GET_MAX_DATAGRAM_SIZE,
-                             USB_TYPE_CLASS | USB_DIR_IN | USB_RECIP_INTERFACE,
-                             0, iface_no, &max_datagram_size, 2);
-       if (err < 0) {
-               dev_dbg(&dev->intf->dev, "GET_MAX_DATAGRAM_SIZE failed\n");
-               goto out;
-       }
-
-       if (le16_to_cpu(max_datagram_size) == ctx->max_datagram_size)
-               goto out;
+static int cdc_ncm_setup(struct usbnet *dev)
+{
+       struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
+       u32 def_rx, def_tx;
 
-       max_datagram_size = cpu_to_le16(ctx->max_datagram_size);
-       err = usbnet_write_cmd(dev, USB_CDC_SET_MAX_DATAGRAM_SIZE,
-                              USB_TYPE_CLASS | USB_DIR_OUT | USB_RECIP_INTERFACE,
-                              0, iface_no, &max_datagram_size, 2);
-       if (err < 0)
-               dev_dbg(&dev->intf->dev, "SET_MAX_DATAGRAM_SIZE failed\n");
+       /* be conservative when selecting intial buffer size to
+        * increase the number of hosts this will work for
+        */
+       def_rx = min_t(u32, CDC_NCM_NTB_DEF_SIZE_RX,
+                      le32_to_cpu(ctx->ncm_parm.dwNtbInMaxSize));
+       def_tx = min_t(u32, CDC_NCM_NTB_DEF_SIZE_TX,
+                      le32_to_cpu(ctx->ncm_parm.dwNtbOutMaxSize));
 
-out:
-       /* set MTU to max supported by the device if necessary */
-       if (dev->net->mtu > ctx->max_datagram_size - eth_hlen)
-               dev->net->mtu = ctx->max_datagram_size - eth_hlen;
+       /* clamp rx_max and tx_max and inform device */
+       cdc_ncm_update_rxtx_max(dev, def_rx, def_tx);
 
-       /* do not exceed operater preferred MTU */
-       if (ctx->mbim_extended_desc) {
-               mbim_mtu = le16_to_cpu(ctx->mbim_extended_desc->wMTU);
-               if (mbim_mtu != 0 && mbim_mtu < dev->net->mtu)
-                       dev->net->mtu = mbim_mtu;
-       }
+       /* sanitize the modulus and remainder values */
+       cdc_ncm_fix_modulus(dev);
 
+       /* set max datagram size */
+       cdc_ncm_set_dgram_size(dev, cdc_ncm_max_dgram_size(dev));
        return 0;
 }
 
@@ -424,10 +674,21 @@ advance:
        }
 
        /* check if we got everything */
-       if (!ctx->data || (!ctx->mbim_desc && !ctx->ether_desc)) {
-               dev_dbg(&intf->dev, "CDC descriptors missing\n");
+       if (!ctx->data) {
+               dev_dbg(&intf->dev, "CDC Union missing and no IAD found\n");
                goto error;
        }
+       if (cdc_ncm_comm_intf_is_mbim(intf->cur_altsetting)) {
+               if (!ctx->mbim_desc) {
+                       dev_dbg(&intf->dev, "MBIM functional descriptor missing\n");
+                       goto error;
+               }
+       } else {
+               if (!ctx->ether_desc || !ctx->func_desc) {
+                       dev_dbg(&intf->dev, "NCM or ECM functional descriptors missing\n");
+                       goto error;
+               }
+       }
 
        /* claim data interface, if different from control */
        if (ctx->data != ctx->control) {
@@ -447,8 +708,8 @@ advance:
                goto error2;
        }
 
-       /* initialize data interface */
-       if (cdc_ncm_setup(dev))
+       /* initialize basic device settings */
+       if (cdc_ncm_init(dev))
                goto error2;
 
        /* configure data interface */
@@ -477,18 +738,11 @@ advance:
                dev_info(&intf->dev, "MAC-Address: %pM\n", dev->net->dev_addr);
        }
 
-       /* usbnet use these values for sizing tx/rx queues */
-       dev->hard_mtu = ctx->tx_max;
-       dev->rx_urb_size = ctx->rx_max;
+       /* finish setting up the device specific data */
+       cdc_ncm_setup(dev);
 
-       /* cdc_ncm_setup will override dwNtbOutMaxSize if it is
-        * outside the sane range. Adding a pad byte here if necessary
-        * simplifies the handling in cdc_ncm_fill_tx_frame, making
-        * tx_max always represent the real skb max size.
-        */
-       if (ctx->tx_max != le32_to_cpu(ctx->ncm_parm.dwNtbOutMaxSize) &&
-           ctx->tx_max % usb_maxpacket(dev->udev, dev->out, 1) == 0)
-               ctx->tx_max++;
+       /* override ethtool_ops */
+       dev->net->ethtool_ops = &cdc_ncm_ethtool_ops;
 
        return 0;
 
@@ -541,10 +795,10 @@ void cdc_ncm_unbind(struct usbnet *dev, struct usb_interface *intf)
 }
 EXPORT_SYMBOL_GPL(cdc_ncm_unbind);
 
-/* Select the MBIM altsetting iff it is preferred and available,
- * returning the number of the corresponding data interface altsetting
+/* Return the number of the MBIM control interface altsetting iff it
+ * is preferred and available,
  */
-u8 cdc_ncm_select_altsetting(struct usbnet *dev, struct usb_interface *intf)
+u8 cdc_ncm_select_altsetting(struct usb_interface *intf)
 {
        struct usb_host_interface *alt;
 
@@ -563,15 +817,15 @@ u8 cdc_ncm_select_altsetting(struct usbnet *dev, struct usb_interface *intf)
         *   the rules given in section 6 (USB Device Model) of this
         *   specification."
         */
-       if (prefer_mbim && intf->num_altsetting == 2) {
+       if (intf->num_altsetting < 2)
+               return intf->cur_altsetting->desc.bAlternateSetting;
+
+       if (prefer_mbim) {
                alt = usb_altnum_to_altsetting(intf, CDC_NCM_COMM_ALTSETTING_MBIM);
-               if (alt && cdc_ncm_comm_intf_is_mbim(alt) &&
-                   !usb_set_interface(dev->udev,
-                                      intf->cur_altsetting->desc.bInterfaceNumber,
-                                      CDC_NCM_COMM_ALTSETTING_MBIM))
-                       return CDC_NCM_DATA_ALTSETTING_MBIM;
+               if (alt && cdc_ncm_comm_intf_is_mbim(alt))
+                       return CDC_NCM_COMM_ALTSETTING_MBIM;
        }
-       return CDC_NCM_DATA_ALTSETTING_NCM;
+       return CDC_NCM_COMM_ALTSETTING_NCM;
 }
 EXPORT_SYMBOL_GPL(cdc_ncm_select_altsetting);
 
@@ -580,12 +834,11 @@ static int cdc_ncm_bind(struct usbnet *dev, struct usb_interface *intf)
        int ret;
 
        /* MBIM backwards compatible function? */
-       cdc_ncm_select_altsetting(dev, intf);
-       if (cdc_ncm_comm_intf_is_mbim(intf->cur_altsetting))
+       if (cdc_ncm_select_altsetting(intf) != CDC_NCM_COMM_ALTSETTING_NCM)
                return -ENODEV;
 
-       /* NCM data altsetting is always 1 */
-       ret = cdc_ncm_bind_common(dev, intf, 1);
+       /* The NCM data altsetting is fixed */
+       ret = cdc_ncm_bind_common(dev, intf, CDC_NCM_DATA_ALTSETTING_NCM);
 
        /*
         * We should get an event when network connection is "connected" or
@@ -628,7 +881,7 @@ static struct usb_cdc_ncm_ndp16 *cdc_ncm_ndp(struct cdc_ncm_ctx *ctx, struct sk_
        cdc_ncm_align_tail(skb, ctx->tx_ndp_modulus, 0, ctx->tx_max);
 
        /* verify that there is room for the NDP and the datagram (reserve) */
-       if ((ctx->tx_max - skb->len - reserve) < CDC_NCM_NDP_SIZE)
+       if ((ctx->tx_max - skb->len - reserve) < ctx->max_ndp_size)
                return NULL;
 
        /* link to it */
@@ -638,7 +891,7 @@ static struct usb_cdc_ncm_ndp16 *cdc_ncm_ndp(struct cdc_ncm_ctx *ctx, struct sk_
                nth16->wNdpIndex = cpu_to_le16(skb->len);
 
        /* push a new empty NDP */
-       ndp16 = (struct usb_cdc_ncm_ndp16 *)memset(skb_put(skb, CDC_NCM_NDP_SIZE), 0, CDC_NCM_NDP_SIZE);
+       ndp16 = (struct usb_cdc_ncm_ndp16 *)memset(skb_put(skb, ctx->max_ndp_size), 0, ctx->max_ndp_size);
        ndp16->dwSignature = sign;
        ndp16->wLength = cpu_to_le16(sizeof(struct usb_cdc_ncm_ndp16) + sizeof(struct usb_cdc_ncm_dpe16));
        return ndp16;
@@ -683,6 +936,9 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
 
                /* count total number of frames in this NTB */
                ctx->tx_curr_frame_num = 0;
+
+               /* recent payload counter for this skb_out */
+               ctx->tx_curr_frame_payload = 0;
        }
 
        for (n = ctx->tx_curr_frame_num; n < ctx->tx_max_datagrams; n++) {
@@ -720,6 +976,7 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
                                ctx->tx_rem_sign = sign;
                                skb = NULL;
                                ready2send = 1;
+                               ctx->tx_reason_ntb_full++;      /* count reason for transmitting */
                        }
                        break;
                }
@@ -733,12 +990,14 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
                ndp16->dpe16[index].wDatagramIndex = cpu_to_le16(skb_out->len);
                ndp16->wLength = cpu_to_le16(ndplen + sizeof(struct usb_cdc_ncm_dpe16));
                memcpy(skb_put(skb_out, skb->len), skb->data, skb->len);
+               ctx->tx_curr_frame_payload += skb->len; /* count real tx payload data */
                dev_kfree_skb_any(skb);
                skb = NULL;
 
                /* send now if this NDP is full */
                if (index >= CDC_NCM_DPT_DATAGRAMS_MAX) {
                        ready2send = 1;
+                       ctx->tx_reason_ndp_full++;      /* count reason for transmitting */
                        break;
                }
        }
@@ -758,7 +1017,7 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
                ctx->tx_curr_skb = skb_out;
                goto exit_no_skb;
 
-       } else if ((n < ctx->tx_max_datagrams) && (ready2send == 0)) {
+       } else if ((n < ctx->tx_max_datagrams) && (ready2send == 0) && (ctx->timer_interval > 0)) {
                /* wait for more frames */
                /* push variables */
                ctx->tx_curr_skb = skb_out;
@@ -768,11 +1027,13 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
                goto exit_no_skb;
 
        } else {
+               if (n == ctx->tx_max_datagrams)
+                       ctx->tx_reason_max_datagram++;  /* count reason for transmitting */
                /* frame goes out */
                /* variables will be reset at next call */
        }
 
-       /* If collected data size is less or equal CDC_NCM_MIN_TX_PKT
+       /* If collected data size is less or equal ctx->min_tx_pkt
         * bytes, we send buffers as it is. If we get more data, it
         * would be more efficient for USB HS mobile device with DMA
         * engine to receive a full size NTB, than canceling DMA
@@ -782,10 +1043,10 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
         * a ZLP after full sized NTBs.
         */
        if (!(dev->driver_info->flags & FLAG_SEND_ZLP) &&
-           skb_out->len > CDC_NCM_MIN_TX_PKT)
+           skb_out->len > ctx->min_tx_pkt)
                memset(skb_put(skb_out, ctx->tx_max - skb_out->len), 0,
                       ctx->tx_max - skb_out->len);
-       else if ((skb_out->len % dev->maxpacket) == 0)
+       else if (skb_out->len < ctx->tx_max && (skb_out->len % dev->maxpacket) == 0)
                *skb_put(skb_out, 1) = 0;       /* force short packet */
 
        /* set final frame length */
@@ -795,11 +1056,22 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
        /* return skb */
        ctx->tx_curr_skb = NULL;
        dev->net->stats.tx_packets += ctx->tx_curr_frame_num;
+
+       /* keep private stats: framing overhead and number of NTBs */
+       ctx->tx_overhead += skb_out->len - ctx->tx_curr_frame_payload;
+       ctx->tx_ntbs++;
+
+       /* usbnet has already counted all the framing overhead.
+        * Adjust the stats so that the tx_bytes counter show real
+        * payload data instead.
+        */
+       dev->net->stats.tx_bytes -= skb_out->len - ctx->tx_curr_frame_payload;
+
        return skb_out;
 
 exit_no_skb:
-       /* Start timer, if there is a remaining skb */
-       if (ctx->tx_curr_skb != NULL)
+       /* Start timer, if there is a remaining non-empty skb */
+       if (ctx->tx_curr_skb != NULL && n > 0)
                cdc_ncm_tx_timeout_start(ctx);
        return NULL;
 }
@@ -810,7 +1082,7 @@ static void cdc_ncm_tx_timeout_start(struct cdc_ncm_ctx *ctx)
        /* start timer, if not already started */
        if (!(hrtimer_active(&ctx->tx_timer) || atomic_read(&ctx->stop)))
                hrtimer_start(&ctx->tx_timer,
-                               ktime_set(0, CDC_NCM_TIMER_INTERVAL),
+                               ktime_set(0, ctx->timer_interval),
                                HRTIMER_MODE_REL);
 }
 
@@ -835,6 +1107,7 @@ static void cdc_ncm_txpath_bh(unsigned long param)
                cdc_ncm_tx_timeout_start(ctx);
                spin_unlock_bh(&ctx->mtx);
        } else if (dev->net != NULL) {
+               ctx->tx_reason_timeout++;       /* count reason for transmitting */
                spin_unlock_bh(&ctx->mtx);
                netif_tx_lock_bh(dev->net);
                usbnet_start_xmit(NULL, dev->net);
@@ -970,6 +1243,7 @@ int cdc_ncm_rx_fixup(struct usbnet *dev, struct sk_buff *skb_in)
        struct usb_cdc_ncm_dpe16 *dpe16;
        int ndpoffset;
        int loopcount = 50; /* arbitrary max preventing infinite loop */
+       u32 payload = 0;
 
        ndpoffset = cdc_ncm_rx_verify_nth16(ctx, skb_in);
        if (ndpoffset < 0)
@@ -1022,6 +1296,7 @@ next_ndp:
                        skb->data = ((u8 *)skb_in->data) + offset;
                        skb_set_tail_pointer(skb, len);
                        usbnet_skb_return(dev, skb);
+                       payload += len; /* count payload bytes in this NTB */
                }
        }
 err_ndp:
@@ -1030,6 +1305,10 @@ err_ndp:
        if (ndpoffset && loopcount--)
                goto next_ndp;
 
+       /* update stats */
+       ctx->rx_overhead += skb_in->len - payload;
+       ctx->rx_ntbs++;
+
        return 1;
 error:
        return 0;
@@ -1049,14 +1328,14 @@ cdc_ncm_speed_change(struct usbnet *dev,
         */
        if ((tx_speed > 1000000) && (rx_speed > 1000000)) {
                netif_info(dev, link, dev->net,
-                      "%u mbit/s downlink %u mbit/s uplink\n",
-                      (unsigned int)(rx_speed / 1000000U),
-                      (unsigned int)(tx_speed / 1000000U));
+                          "%u mbit/s downlink %u mbit/s uplink\n",
+                          (unsigned int)(rx_speed / 1000000U),
+                          (unsigned int)(tx_speed / 1000000U));
        } else {
                netif_info(dev, link, dev->net,
-                      "%u kbit/s downlink %u kbit/s uplink\n",
-                      (unsigned int)(rx_speed / 1000U),
-                      (unsigned int)(tx_speed / 1000U));
+                          "%u kbit/s downlink %u kbit/s uplink\n",
+                          (unsigned int)(rx_speed / 1000U),
+                          (unsigned int)(tx_speed / 1000U));
        }
 }
 
@@ -1086,11 +1365,10 @@ static void cdc_ncm_status(struct usbnet *dev, struct urb *urb)
                 * USB_CDC_NOTIFY_NETWORK_CONNECTION notification shall be
                 * sent by device after USB_CDC_NOTIFY_SPEED_CHANGE.
                 */
-               ctx->connected = le16_to_cpu(event->wValue);
                netif_info(dev, link, dev->net,
                           "network connection: %sconnected\n",
-                          ctx->connected ? "" : "dis");
-               usbnet_link_change(dev, ctx->connected, 0);
+                          !!event->wValue ? "" : "dis");
+               usbnet_link_change(dev, !!event->wValue, 0);
                break;
 
        case USB_CDC_NOTIFY_SPEED_CHANGE:
@@ -1110,23 +1388,11 @@ static void cdc_ncm_status(struct usbnet *dev, struct urb *urb)
        }
 }
 
-static int cdc_ncm_check_connect(struct usbnet *dev)
-{
-       struct cdc_ncm_ctx *ctx;
-
-       ctx = (struct cdc_ncm_ctx *)dev->data[0];
-       if (ctx == NULL)
-               return 1;       /* disconnected */
-
-       return !ctx->connected;
-}
-
 static const struct driver_info cdc_ncm_info = {
        .description = "CDC NCM",
        .flags = FLAG_POINTTOPOINT | FLAG_NO_SETINT | FLAG_MULTI_PACKET,
        .bind = cdc_ncm_bind,
        .unbind = cdc_ncm_unbind,
-       .check_connect = cdc_ncm_check_connect,
        .manage_power = usbnet_manage_power,
        .status = cdc_ncm_status,
        .rx_fixup = cdc_ncm_rx_fixup,
@@ -1140,7 +1406,6 @@ static const struct driver_info wwan_info = {
                        | FLAG_WWAN,
        .bind = cdc_ncm_bind,
        .unbind = cdc_ncm_unbind,
-       .check_connect = cdc_ncm_check_connect,
        .manage_power = usbnet_manage_power,
        .status = cdc_ncm_status,
        .rx_fixup = cdc_ncm_rx_fixup,
@@ -1154,7 +1419,6 @@ static const struct driver_info wwan_noarp_info = {
                        | FLAG_WWAN | FLAG_NOARP,
        .bind = cdc_ncm_bind,
        .unbind = cdc_ncm_unbind,
-       .check_connect = cdc_ncm_check_connect,
        .manage_power = usbnet_manage_power,
        .status = cdc_ncm_status,
        .rx_fixup = cdc_ncm_rx_fixup,
index 660bd5ea9fc0b311918812af8d3959c830b96cf4..a3a05869309df6a1ac34cdb00c6ff4d031dc921b 100644 (file)
@@ -2425,7 +2425,7 @@ static void hso_net_init(struct net_device *net)
        net->type = ARPHRD_NONE;
        net->mtu = DEFAULT_MTU - 14;
        net->tx_queue_len = 10;
-       SET_ETHTOOL_OPS(net, &ops);
+       net->ethtool_ops = &ops;
 
        /* and initialize the semaphore */
        spin_lock_init(&hso_net->net_lock);
index 312178d7b698e06a6bc97ede9c7c1eb49510c322..f9822bc75425a9bacc2dcd8915344c6373778a2c 100644 (file)
@@ -172,24 +172,11 @@ err:
        return ret;
 }
 
-static int huawei_cdc_ncm_check_connect(struct usbnet *usbnet_dev)
-{
-       struct cdc_ncm_ctx *ctx;
-
-       ctx = (struct cdc_ncm_ctx *)usbnet_dev->data[0];
-
-       if (ctx == NULL)
-               return 1; /* disconnected */
-
-       return !ctx->connected;
-}
-
 static const struct driver_info huawei_cdc_ncm_info = {
        .description = "Huawei CDC NCM device",
        .flags = FLAG_NO_SETINT | FLAG_MULTI_PACKET | FLAG_WWAN,
        .bind = huawei_cdc_ncm_bind,
        .unbind = huawei_cdc_ncm_unbind,
-       .check_connect = huawei_cdc_ncm_check_connect,
        .manage_power = huawei_cdc_ncm_manage_power,
        .rx_fixup = cdc_ncm_rx_fixup,
        .tx_fixup = cdc_ncm_tx_fixup,
index 421934c83f1cdb3b1e7dbfbd5e0bd19d944a0da1..f72570708edb52db75031253e8e3a9d5fb811587 100644 (file)
@@ -524,7 +524,7 @@ static int ipheth_probe(struct usb_interface *intf,
        usb_set_intfdata(intf, dev);
 
        SET_NETDEV_DEV(netdev, &intf->dev);
-       SET_ETHTOOL_OPS(netdev, &ops);
+       netdev->ethtool_ops = &ops;
 
        retval = register_netdev(netdev);
        if (retval) {
index a359d3bb7c5b125422cf59dea69c8a905099fed7..dcb6d33141e0640f545555848434d8efd7822878 100644 (file)
@@ -1171,7 +1171,7 @@ err_fw:
        netdev->netdev_ops = &kaweth_netdev_ops;
        netdev->watchdog_timeo = KAWETH_TX_TIMEOUT;
        netdev->mtu = le16_to_cpu(kaweth->configuration.segment_size);
-       SET_ETHTOOL_OPS(netdev, &ops);
+       netdev->ethtool_ops = &ops;
 
        /* kaweth is zeroed as part of alloc_netdev */
        INIT_DELAYED_WORK(&kaweth->lowmem_work, kaweth_resubmit_tl);
index 03e8a15d7deb74d328d5e563ed2077f62c8d9687..f840802159158b56a5b840d7abb83152fcd0c83f 100644 (file)
@@ -1159,7 +1159,7 @@ static int pegasus_probe(struct usb_interface *intf,
 
        net->watchdog_timeo = PEGASUS_TX_TIMEOUT;
        net->netdev_ops = &pegasus_netdev_ops;
-       SET_ETHTOOL_OPS(net, &ops);
+       net->ethtool_ops = &ops;
        pegasus->mii.dev = net;
        pegasus->mii.mdio_read = mdio_read;
        pegasus->mii.mdio_write = mdio_write;
index e3458e3c44f146653048aba99295670caabd4db5..83208d4fdc5983aa963dbf1640732c17706ef9a2 100644 (file)
@@ -669,6 +669,22 @@ static const struct usb_device_id products[] = {
        {QMI_FIXED_INTF(0x05c6, 0x920d, 5)},
        {QMI_FIXED_INTF(0x12d1, 0x140c, 1)},    /* Huawei E173 */
        {QMI_FIXED_INTF(0x12d1, 0x14ac, 1)},    /* Huawei E1820 */
+       {QMI_FIXED_INTF(0x16d8, 0x6003, 0)},    /* CMOTech 6003 */
+       {QMI_FIXED_INTF(0x16d8, 0x6007, 0)},    /* CMOTech CHE-628S */
+       {QMI_FIXED_INTF(0x16d8, 0x6008, 0)},    /* CMOTech CMU-301 */
+       {QMI_FIXED_INTF(0x16d8, 0x6280, 0)},    /* CMOTech CHU-628 */
+       {QMI_FIXED_INTF(0x16d8, 0x7001, 0)},    /* CMOTech CHU-720S */
+       {QMI_FIXED_INTF(0x16d8, 0x7002, 0)},    /* CMOTech 7002 */
+       {QMI_FIXED_INTF(0x16d8, 0x7003, 4)},    /* CMOTech CHU-629K */
+       {QMI_FIXED_INTF(0x16d8, 0x7004, 3)},    /* CMOTech 7004 */
+       {QMI_FIXED_INTF(0x16d8, 0x7006, 5)},    /* CMOTech CGU-629 */
+       {QMI_FIXED_INTF(0x16d8, 0x700a, 4)},    /* CMOTech CHU-629S */
+       {QMI_FIXED_INTF(0x16d8, 0x7211, 0)},    /* CMOTech CHU-720I */
+       {QMI_FIXED_INTF(0x16d8, 0x7212, 0)},    /* CMOTech 7212 */
+       {QMI_FIXED_INTF(0x16d8, 0x7213, 0)},    /* CMOTech 7213 */
+       {QMI_FIXED_INTF(0x16d8, 0x7251, 1)},    /* CMOTech 7251 */
+       {QMI_FIXED_INTF(0x16d8, 0x7252, 1)},    /* CMOTech 7252 */
+       {QMI_FIXED_INTF(0x16d8, 0x7253, 1)},    /* CMOTech 7253 */
        {QMI_FIXED_INTF(0x19d2, 0x0002, 1)},
        {QMI_FIXED_INTF(0x19d2, 0x0012, 1)},
        {QMI_FIXED_INTF(0x19d2, 0x0017, 3)},
@@ -730,16 +746,28 @@ static const struct usb_device_id products[] = {
        {QMI_FIXED_INTF(0x114f, 0x68a2, 8)},    /* Sierra Wireless MC7750 */
        {QMI_FIXED_INTF(0x1199, 0x68a2, 8)},    /* Sierra Wireless MC7710 in QMI mode */
        {QMI_FIXED_INTF(0x1199, 0x68a2, 19)},   /* Sierra Wireless MC7710 in QMI mode */
+       {QMI_FIXED_INTF(0x1199, 0x68c0, 8)},    /* Sierra Wireless MC73xx */
+       {QMI_FIXED_INTF(0x1199, 0x68c0, 10)},   /* Sierra Wireless MC73xx */
+       {QMI_FIXED_INTF(0x1199, 0x68c0, 11)},   /* Sierra Wireless MC73xx */
        {QMI_FIXED_INTF(0x1199, 0x901c, 8)},    /* Sierra Wireless EM7700 */
+       {QMI_FIXED_INTF(0x1199, 0x901f, 8)},    /* Sierra Wireless EM7355 */
+       {QMI_FIXED_INTF(0x1199, 0x9041, 8)},    /* Sierra Wireless MC7305/MC7355 */
        {QMI_FIXED_INTF(0x1199, 0x9051, 8)},    /* Netgear AirCard 340U */
        {QMI_FIXED_INTF(0x1bbb, 0x011e, 4)},    /* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */
+       {QMI_FIXED_INTF(0x1bbb, 0x0203, 2)},    /* Alcatel L800MA */
        {QMI_FIXED_INTF(0x2357, 0x0201, 4)},    /* TP-LINK HSUPA Modem MA180 */
        {QMI_FIXED_INTF(0x2357, 0x9000, 4)},    /* TP-LINK MA260 */
        {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)},    /* Telit LE920 */
        {QMI_FIXED_INTF(0x1bc7, 0x1201, 2)},    /* Telit LE920 */
        {QMI_FIXED_INTF(0x0b3c, 0xc005, 6)},    /* Olivetti Olicard 200 */
+       {QMI_FIXED_INTF(0x0b3c, 0xc00b, 4)},    /* Olivetti Olicard 500 */
        {QMI_FIXED_INTF(0x1e2d, 0x0060, 4)},    /* Cinterion PLxx */
        {QMI_FIXED_INTF(0x1e2d, 0x0053, 4)},    /* Cinterion PHxx,PXxx */
+       {QMI_FIXED_INTF(0x413c, 0x81a2, 8)},    /* Dell Wireless 5806 Gobi(TM) 4G LTE Mobile Broadband Card */
+       {QMI_FIXED_INTF(0x413c, 0x81a3, 8)},    /* Dell Wireless 5570 HSPA+ (42Mbps) Mobile Broadband Card */
+       {QMI_FIXED_INTF(0x413c, 0x81a4, 8)},    /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card */
+       {QMI_FIXED_INTF(0x413c, 0x81a8, 8)},    /* Dell Wireless 5808 Gobi(TM) 4G LTE Mobile Broadband Card */
+       {QMI_FIXED_INTF(0x413c, 0x81a9, 8)},    /* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card */
 
        /* 4. Gobi 1000 devices */
        {QMI_GOBI1K_DEVICE(0x05c6, 0x9212)},    /* Acer Gobi Modem Device */
index 3fbfb0869030aeeb6c540766bdbf924d959abcd6..9f91c7aba4b0c7961bca73593411d821c751566b 100644 (file)
@@ -3452,7 +3452,7 @@ static int rtl8152_probe(struct usb_interface *intf,
                              NETIF_F_TSO | NETIF_F_FRAGLIST |
                              NETIF_F_IPV6_CSUM | NETIF_F_TSO6;
 
-       SET_ETHTOOL_OPS(netdev, &ops);
+       netdev->ethtool_ops = &ops;
        netif_set_gso_max_size(netdev, RTL_LIMITED_TSO_SIZE);
 
        tp->mii.dev = netdev;
index da2c4583bd2d9d4894ea64ae566a028e9da96b2b..6e87e5710048cf952c1505950d954a493b6b8878 100644 (file)
@@ -878,7 +878,7 @@ static int rtl8150_probe(struct usb_interface *intf,
        dev->netdev = netdev;
        netdev->netdev_ops = &rtl8150_netdev_ops;
        netdev->watchdog_timeo = RTL8150_TX_TIMEOUT;
-       SET_ETHTOOL_OPS(netdev, &ops);
+       netdev->ethtool_ops = &ops;
        dev->intr_interval = 100;       /* 100ms */
 
        if (!alloc_all_urbs(dev)) {
index 7b687469199b58357a74490cb266e4d47534c097..7d9f84a91f37dd96fbd835d170dc8ed24c281b6c 100644 (file)
@@ -1285,7 +1285,7 @@ static int virtnet_set_channels(struct net_device *dev,
        if (channels->rx_count || channels->tx_count || channels->other_count)
                return -EINVAL;
 
-       if (queue_pairs > vi->max_queue_pairs)
+       if (queue_pairs > vi->max_queue_pairs || queue_pairs == 0)
                return -EINVAL;
 
        get_online_cpus();
@@ -1646,7 +1646,7 @@ static int virtnet_probe(struct virtio_device *vdev)
        dev->netdev_ops = &virtnet_netdev;
        dev->features = NETIF_F_HIGHDMA;
 
-       SET_ETHTOOL_OPS(dev, &virtnet_ethtool_ops);
+       dev->ethtool_ops = &virtnet_ethtool_ops;
        SET_NETDEV_DEV(dev, &vdev->dev);
 
        /* Do we support "hardware" checksums? */
@@ -1724,6 +1724,13 @@ static int virtnet_probe(struct virtio_device *vdev)
        if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ))
                vi->has_cvq = true;
 
+       if (vi->any_header_sg) {
+               if (vi->mergeable_rx_bufs)
+                       dev->needed_headroom = sizeof(struct virtio_net_hdr_mrg_rxbuf);
+               else
+                       dev->needed_headroom = sizeof(struct virtio_net_hdr);
+       }
+
        /* Use single tx/rx queue pair as default */
        vi->curr_queue_pairs = 1;
        vi->max_queue_pairs = max_queue_pairs;
index 600ab56c0008bab49251505e38ca911b611da2c1..00e120296e923ef3dc5433112941e8d643bf7bf6 100644 (file)
@@ -635,5 +635,5 @@ static const struct ethtool_ops vmxnet3_ethtool_ops = {
 
 void vmxnet3_set_ethtool_ops(struct net_device *netdev)
 {
-       SET_ETHTOOL_OPS(netdev, &vmxnet3_ethtool_ops);
+       netdev->ethtool_ops = &vmxnet3_ethtool_ops;
 }
index 82355d5d155a86921be733cc40deefcbaa6b7116..e68c8eb4ea8e297ad3c3b146a194a8ac84e4b1c6 100644 (file)
@@ -127,6 +127,7 @@ struct vxlan_dev {
        struct list_head  next;         /* vxlan's per namespace list */
        struct vxlan_sock *vn_sock;     /* listening socket */
        struct net_device *dev;
+       struct net        *net;         /* netns for packet i/o */
        struct vxlan_rdst default_dst;  /* default destination */
        union vxlan_addr  saddr;        /* source address */
        __be16            dst_port;
@@ -389,8 +390,8 @@ static inline size_t vxlan_nlmsg_size(void)
                + nla_total_size(sizeof(struct nda_cacheinfo));
 }
 
-static void vxlan_fdb_notify(struct vxlan_dev *vxlan,
-                            struct vxlan_fdb *fdb, int type)
+static void vxlan_fdb_notify(struct vxlan_dev *vxlan, struct vxlan_fdb *fdb,
+                            struct vxlan_rdst *rd, int type)
 {
        struct net *net = dev_net(vxlan->dev);
        struct sk_buff *skb;
@@ -400,8 +401,7 @@ static void vxlan_fdb_notify(struct vxlan_dev *vxlan,
        if (skb == NULL)
                goto errout;
 
-       err = vxlan_fdb_info(skb, vxlan, fdb, 0, 0, type, 0,
-                            first_remote_rtnl(fdb));
+       err = vxlan_fdb_info(skb, vxlan, fdb, 0, 0, type, 0, rd);
        if (err < 0) {
                /* -EMSGSIZE implies BUG in vxlan_nlmsg_size() */
                WARN_ON(err == -EMSGSIZE);
@@ -427,10 +427,7 @@ static void vxlan_ip_miss(struct net_device *dev, union vxlan_addr *ipa)
                .remote_vni = VXLAN_N_VID,
        };
 
-       INIT_LIST_HEAD(&f.remotes);
-       list_add_rcu(&remote.list, &f.remotes);
-
-       vxlan_fdb_notify(vxlan, &f, RTM_GETNEIGH);
+       vxlan_fdb_notify(vxlan, &f, &remote, RTM_GETNEIGH);
 }
 
 static void vxlan_fdb_miss(struct vxlan_dev *vxlan, const u8 eth_addr[ETH_ALEN])
@@ -438,11 +435,11 @@ static void vxlan_fdb_miss(struct vxlan_dev *vxlan, const u8 eth_addr[ETH_ALEN])
        struct vxlan_fdb f = {
                .state = NUD_STALE,
        };
+       struct vxlan_rdst remote = { };
 
-       INIT_LIST_HEAD(&f.remotes);
        memcpy(f.eth_addr, eth_addr, ETH_ALEN);
 
-       vxlan_fdb_notify(vxlan, &f, RTM_GETNEIGH);
+       vxlan_fdb_notify(vxlan, &f, &remote, RTM_GETNEIGH);
 }
 
 /* Hash Ethernet address */
@@ -533,7 +530,8 @@ static int vxlan_fdb_replace(struct vxlan_fdb *f,
 
 /* Add/update destinations for multicast */
 static int vxlan_fdb_append(struct vxlan_fdb *f,
-                           union vxlan_addr *ip, __be16 port, __u32 vni, __u32 ifindex)
+                           union vxlan_addr *ip, __be16 port, __u32 vni,
+                           __u32 ifindex, struct vxlan_rdst **rdp)
 {
        struct vxlan_rdst *rd;
 
@@ -551,6 +549,7 @@ static int vxlan_fdb_append(struct vxlan_fdb *f,
 
        list_add_tail_rcu(&rd->list, &f->remotes);
 
+       *rdp = rd;
        return 1;
 }
 
@@ -690,6 +689,7 @@ static int vxlan_fdb_create(struct vxlan_dev *vxlan,
                            __be16 port, __u32 vni, __u32 ifindex,
                            __u8 ndm_flags)
 {
+       struct vxlan_rdst *rd = NULL;
        struct vxlan_fdb *f;
        int notify = 0;
 
@@ -726,7 +726,8 @@ static int vxlan_fdb_create(struct vxlan_dev *vxlan,
                if ((flags & NLM_F_APPEND) &&
                    (is_multicast_ether_addr(f->eth_addr) ||
                     is_zero_ether_addr(f->eth_addr))) {
-                       int rc = vxlan_fdb_append(f, ip, port, vni, ifindex);
+                       int rc = vxlan_fdb_append(f, ip, port, vni, ifindex,
+                                                 &rd);
 
                        if (rc < 0)
                                return rc;
@@ -756,15 +757,18 @@ static int vxlan_fdb_create(struct vxlan_dev *vxlan,
                INIT_LIST_HEAD(&f->remotes);
                memcpy(f->eth_addr, mac, ETH_ALEN);
 
-               vxlan_fdb_append(f, ip, port, vni, ifindex);
+               vxlan_fdb_append(f, ip, port, vni, ifindex, &rd);
 
                ++vxlan->addrcnt;
                hlist_add_head_rcu(&f->hlist,
                                   vxlan_fdb_head(vxlan, mac));
        }
 
-       if (notify)
-               vxlan_fdb_notify(vxlan, f, RTM_NEWNEIGH);
+       if (notify) {
+               if (rd == NULL)
+                       rd = first_remote_rtnl(f);
+               vxlan_fdb_notify(vxlan, f, rd, RTM_NEWNEIGH);
+       }
 
        return 0;
 }
@@ -785,7 +789,7 @@ static void vxlan_fdb_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f)
                    "delete %pM\n", f->eth_addr);
 
        --vxlan->addrcnt;
-       vxlan_fdb_notify(vxlan, f, RTM_DELNEIGH);
+       vxlan_fdb_notify(vxlan, f, first_remote_rtnl(f), RTM_DELNEIGH);
 
        hlist_del_rcu(&f->hlist);
        call_rcu(&f->rcu, vxlan_fdb_free);
@@ -919,6 +923,7 @@ static int vxlan_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[],
         */
        if (rd && !list_is_singular(&f->remotes)) {
                list_del_rcu(&rd->list);
+               vxlan_fdb_notify(vxlan, f, rd, RTM_DELNEIGH);
                kfree_rcu(rd, rcu);
                goto out;
        }
@@ -993,7 +998,7 @@ static bool vxlan_snoop(struct net_device *dev,
 
                rdst->remote_ip = *src_ip;
                f->updated = jiffies;
-               vxlan_fdb_notify(vxlan, f, RTM_NEWNEIGH);
+               vxlan_fdb_notify(vxlan, f, rdst, RTM_NEWNEIGH);
        } else {
                /* learned new entry */
                spin_lock(&vxlan->hash_lock);
@@ -1199,6 +1204,7 @@ static void vxlan_rcv(struct vxlan_sock *vs,
 
        remote_ip = &vxlan->default_dst.remote_ip;
        skb_reset_mac_header(skb);
+       skb_scrub_packet(skb, !net_eq(vxlan->net, dev_net(vxlan->dev)));
        skb->protocol = eth_type_trans(skb, vxlan->dev);
 
        /* Ignore packet loops (and multicast echo) */
@@ -1614,7 +1620,8 @@ static int vxlan6_xmit_skb(struct vxlan_sock *vs,
                           struct dst_entry *dst, struct sk_buff *skb,
                           struct net_device *dev, struct in6_addr *saddr,
                           struct in6_addr *daddr, __u8 prio, __u8 ttl,
-                          __be16 src_port, __be16 dst_port, __be32 vni)
+                          __be16 src_port, __be16 dst_port, __be32 vni,
+                          bool xnet)
 {
        struct ipv6hdr *ip6h;
        struct vxlanhdr *vxh;
@@ -1627,7 +1634,7 @@ static int vxlan6_xmit_skb(struct vxlan_sock *vs,
                skb->encapsulation = 1;
        }
 
-       skb_scrub_packet(skb, false);
+       skb_scrub_packet(skb, xnet);
 
        min_headroom = LL_RESERVED_SPACE(dst->dev) + dst->header_len
                        + VXLAN_HLEN + sizeof(struct ipv6hdr)
@@ -1707,7 +1714,7 @@ static int vxlan6_xmit_skb(struct vxlan_sock *vs,
 int vxlan_xmit_skb(struct vxlan_sock *vs,
                   struct rtable *rt, struct sk_buff *skb,
                   __be32 src, __be32 dst, __u8 tos, __u8 ttl, __be16 df,
-                  __be16 src_port, __be16 dst_port, __be32 vni)
+                  __be16 src_port, __be16 dst_port, __be32 vni, bool xnet)
 {
        struct vxlanhdr *vxh;
        struct udphdr *uh;
@@ -1756,7 +1763,7 @@ int vxlan_xmit_skb(struct vxlan_sock *vs,
                return err;
 
        return iptunnel_xmit(vs->sock->sk, rt, skb, src, dst, IPPROTO_UDP,
-                            tos, ttl, df, false);
+                            tos, ttl, df, xnet);
 }
 EXPORT_SYMBOL_GPL(vxlan_xmit_skb);
 
@@ -1849,7 +1856,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
                fl4.daddr = dst->sin.sin_addr.s_addr;
                fl4.saddr = vxlan->saddr.sin.sin_addr.s_addr;
 
-               rt = ip_route_output_key(dev_net(dev), &fl4);
+               rt = ip_route_output_key(vxlan->net, &fl4);
                if (IS_ERR(rt)) {
                        netdev_dbg(dev, "no route to %pI4\n",
                                   &dst->sin.sin_addr.s_addr);
@@ -1870,7 +1877,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
                        struct vxlan_dev *dst_vxlan;
 
                        ip_rt_put(rt);
-                       dst_vxlan = vxlan_find_vni(dev_net(dev), vni, dst_port);
+                       dst_vxlan = vxlan_find_vni(vxlan->net, vni, dst_port);
                        if (!dst_vxlan)
                                goto tx_error;
                        vxlan_encap_bypass(skb, vxlan, dst_vxlan);
@@ -1883,7 +1890,8 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
                err = vxlan_xmit_skb(vxlan->vn_sock, rt, skb,
                                     fl4.saddr, dst->sin.sin_addr.s_addr,
                                     tos, ttl, df, src_port, dst_port,
-                                    htonl(vni << 8));
+                                    htonl(vni << 8),
+                                    !net_eq(vxlan->net, dev_net(vxlan->dev)));
 
                if (err < 0)
                        goto rt_tx_error;
@@ -1923,7 +1931,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
                        struct vxlan_dev *dst_vxlan;
 
                        dst_release(ndst);
-                       dst_vxlan = vxlan_find_vni(dev_net(dev), vni, dst_port);
+                       dst_vxlan = vxlan_find_vni(vxlan->net, vni, dst_port);
                        if (!dst_vxlan)
                                goto tx_error;
                        vxlan_encap_bypass(skb, vxlan, dst_vxlan);
@@ -1934,7 +1942,8 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
 
                err = vxlan6_xmit_skb(vxlan->vn_sock, ndst, skb,
                                      dev, &fl6.saddr, &fl6.daddr, 0, ttl,
-                                     src_port, dst_port, htonl(vni << 8));
+                                     src_port, dst_port, htonl(vni << 8),
+                                     !net_eq(vxlan->net, dev_net(vxlan->dev)));
 #endif
        }
 
@@ -2078,7 +2087,7 @@ static void vxlan_vs_add_dev(struct vxlan_sock *vs, struct vxlan_dev *vxlan)
 static int vxlan_init(struct net_device *dev)
 {
        struct vxlan_dev *vxlan = netdev_priv(dev);
-       struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id);
+       struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
        struct vxlan_sock *vs;
 
        dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
@@ -2086,7 +2095,7 @@ static int vxlan_init(struct net_device *dev)
                return -ENOMEM;
 
        spin_lock(&vn->sock_lock);
-       vs = vxlan_find_sock(dev_net(dev), vxlan->dst_port);
+       vs = vxlan_find_sock(vxlan->net, vxlan->dst_port);
        if (vs) {
                /* If we have a socket with same port already, reuse it */
                atomic_inc(&vs->refcnt);
@@ -2168,8 +2177,8 @@ static void vxlan_flush(struct vxlan_dev *vxlan)
 /* Cleanup timer and forwarding table on shutdown */
 static int vxlan_stop(struct net_device *dev)
 {
-       struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id);
        struct vxlan_dev *vxlan = netdev_priv(dev);
+       struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
        struct vxlan_sock *vs = vxlan->vn_sock;
 
        if (vs && vxlan_addr_multicast(&vxlan->default_dst.remote_ip) &&
@@ -2198,7 +2207,7 @@ static int vxlan_change_mtu(struct net_device *dev, int new_mtu)
        struct net_device *lowerdev;
        int max_mtu;
 
-       lowerdev = __dev_get_by_index(dev_net(dev), dst->remote_ifindex);
+       lowerdev = __dev_get_by_index(vxlan->net, dst->remote_ifindex);
        if (lowerdev == NULL)
                return eth_change_mtu(dev, new_mtu);
 
@@ -2281,7 +2290,6 @@ static void vxlan_setup(struct net_device *dev)
 
        dev->tx_queue_len = 0;
        dev->features   |= NETIF_F_LLTX;
-       dev->features   |= NETIF_F_NETNS_LOCAL;
        dev->features   |= NETIF_F_SG | NETIF_F_HW_CSUM;
        dev->features   |= NETIF_F_RXCSUM;
        dev->features   |= NETIF_F_GSO_SOFTWARE;
@@ -2574,7 +2582,7 @@ EXPORT_SYMBOL_GPL(vxlan_sock_add);
 static void vxlan_sock_work(struct work_struct *work)
 {
        struct vxlan_dev *vxlan = container_of(work, struct vxlan_dev, sock_work);
-       struct net *net = dev_net(vxlan->dev);
+       struct net *net = vxlan->net;
        struct vxlan_net *vn = net_generic(net, vxlan_net_id);
        __be16 port = vxlan->dst_port;
        struct vxlan_sock *nvs;
@@ -2601,6 +2609,8 @@ static int vxlan_newlink(struct net *net, struct net_device *dev,
        if (!data[IFLA_VXLAN_ID])
                return -EINVAL;
 
+       vxlan->net = dev_net(dev);
+
        vni = nla_get_u32(data[IFLA_VXLAN_ID]);
        dst->remote_vni = vni;
 
@@ -2706,7 +2716,7 @@ static int vxlan_newlink(struct net *net, struct net_device *dev,
                return -EEXIST;
        }
 
-       SET_ETHTOOL_OPS(dev, &vxlan_ethtool_ops);
+       dev->ethtool_ops = &vxlan_ethtool_ops;
 
        /* create an fdb entry for a valid default destination */
        if (!vxlan_addr_any(&vxlan->default_dst.remote_ip)) {
@@ -2735,8 +2745,8 @@ static int vxlan_newlink(struct net *net, struct net_device *dev,
 
 static void vxlan_dellink(struct net_device *dev, struct list_head *head)
 {
-       struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id);
        struct vxlan_dev *vxlan = netdev_priv(dev);
+       struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
 
        spin_lock(&vn->sock_lock);
        if (!hlist_unhashed(&vxlan->hlist))
@@ -2901,8 +2911,33 @@ static __net_init int vxlan_init_net(struct net *net)
        return 0;
 }
 
+static void __net_exit vxlan_exit_net(struct net *net)
+{
+       struct vxlan_net *vn = net_generic(net, vxlan_net_id);
+       struct vxlan_dev *vxlan, *next;
+       struct net_device *dev, *aux;
+       LIST_HEAD(list);
+
+       rtnl_lock();
+       for_each_netdev_safe(net, dev, aux)
+               if (dev->rtnl_link_ops == &vxlan_link_ops)
+                       unregister_netdevice_queue(dev, &list);
+
+       list_for_each_entry_safe(vxlan, next, &vn->vxlan_list, next) {
+               /* If vxlan->dev is in the same netns, it has already been added
+                * to the list by the previous loop.
+                */
+               if (!net_eq(dev_net(vxlan->dev), net))
+                       unregister_netdevice_queue(dev, &list);
+       }
+
+       unregister_netdevice_many(&list);
+       rtnl_unlock();
+}
+
 static struct pernet_operations vxlan_net_ops = {
        .init = vxlan_init_net,
+       .exit = vxlan_exit_net,
        .id   = &vxlan_net_id,
        .size = sizeof(struct vxlan_net),
 };
index de3bbf43fc5ac14f41ee9d76eeacb1f059eeaab3..cdd45fb8a1f6892587abddf2abbf0963bd7a5653 100644 (file)
@@ -1322,10 +1322,6 @@ NOTE:  This is rather a useless action right now, as the
 
 static int sdla_change_mtu(struct net_device *dev, int new_mtu)
 {
-       struct frad_local *flp;
-
-       flp = netdev_priv(dev);
-
        if (netif_running(dev))
                return -EBUSY;
 
index 9c34d2fccfac61508705a4021f436e9a9024e936..9c78090e72f87dae118dff22fd366d5ce11bacd8 100644 (file)
@@ -500,26 +500,23 @@ int i2400m_pm_notifier(struct notifier_block *notifier,
  */
 int i2400m_pre_reset(struct i2400m *i2400m)
 {
-       int result;
        struct device *dev = i2400m_dev(i2400m);
 
        d_fnstart(3, dev, "(i2400m %p)\n", i2400m);
        d_printf(1, dev, "pre-reset shut down\n");
 
-       result = 0;
        mutex_lock(&i2400m->init_mutex);
        if (i2400m->updown) {
                netif_tx_disable(i2400m->wimax_dev.net_dev);
                __i2400m_dev_stop(i2400m);
-               result = 0;
                /* down't set updown to zero -- this way
                 * post_reset can restore properly */
        }
        mutex_unlock(&i2400m->init_mutex);
        if (i2400m->bus_release)
                i2400m->bus_release(i2400m);
-       d_fnend(3, dev, "(i2400m %p) = %d\n", i2400m, result);
-       return result;
+       d_fnend(3, dev, "(i2400m %p) = 0\n", i2400m);
+       return 0;
 }
 EXPORT_SYMBOL_GPL(i2400m_pre_reset);
 
index 507d9a9ee69ad4b61ece2d334434691801682efe..f92050617ae682e02bb48b6676a16298ae2dfa4f 100644 (file)
@@ -1090,7 +1090,8 @@ static int ar5523_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
        return ret;
 }
 
-static void ar5523_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
+static void ar5523_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+                        u32 queues, bool drop)
 {
        struct ar5523 *ar = hw->priv;
 
index a1f0996288508e3cad8ecd8e03f33153980e593e..17d221abd58c0bb70d72a6fbf14f1e7422f8ced5 100644 (file)
@@ -175,7 +175,7 @@ int ath10k_bmi_write_memory(struct ath10k *ar,
        return 0;
 }
 
-int ath10k_bmi_execute(struct ath10k *ar, u32 address, u32 *param)
+int ath10k_bmi_execute(struct ath10k *ar, u32 address, u32 param, u32 *result)
 {
        struct bmi_cmd cmd;
        union bmi_resp resp;
@@ -184,7 +184,7 @@ int ath10k_bmi_execute(struct ath10k *ar, u32 address, u32 *param)
        int ret;
 
        ath10k_dbg(ATH10K_DBG_BMI, "bmi execute address 0x%x param 0x%x\n",
-                  address, *param);
+                  address, param);
 
        if (ar->bmi.done_sent) {
                ath10k_warn("command disallowed\n");
@@ -193,7 +193,7 @@ int ath10k_bmi_execute(struct ath10k *ar, u32 address, u32 *param)
 
        cmd.id            = __cpu_to_le32(BMI_EXECUTE);
        cmd.execute.addr  = __cpu_to_le32(address);
-       cmd.execute.param = __cpu_to_le32(*param);
+       cmd.execute.param = __cpu_to_le32(param);
 
        ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, &resp, &resplen);
        if (ret) {
@@ -204,10 +204,13 @@ int ath10k_bmi_execute(struct ath10k *ar, u32 address, u32 *param)
        if (resplen < sizeof(resp.execute)) {
                ath10k_warn("invalid execute response length (%d)\n",
                            resplen);
-               return ret;
+               return -EIO;
        }
 
-       *param = __le32_to_cpu(resp.execute.result);
+       *result = __le32_to_cpu(resp.execute.result);
+
+       ath10k_dbg(ATH10K_DBG_BMI, "bmi execute result 0x%x\n", *result);
+
        return 0;
 }
 
index 8d81ce1cec216c7b55fa1c0ab47b65cc41cafb0d..3a9bdf51c96a212bb98166e42b7f292179c999cf 100644 (file)
@@ -217,7 +217,7 @@ int ath10k_bmi_write_memory(struct ath10k *ar, u32 address,
                ret;                                                    \
        })
 
-int ath10k_bmi_execute(struct ath10k *ar, u32 address, u32 *param);
+int ath10k_bmi_execute(struct ath10k *ar, u32 address, u32 param, u32 *result);
 int ath10k_bmi_lz_stream_start(struct ath10k *ar, u32 address);
 int ath10k_bmi_lz_data(struct ath10k *ar, const void *buffer, u32 length);
 int ath10k_bmi_fast_download(struct ath10k *ar, u32 address,
index a79499c8235009f701073c83b3974d66d183e001..1e4cad8632b527346915a9b3d0545216f881580c 100644 (file)
@@ -840,35 +840,17 @@ void ath10k_ce_recv_cb_register(struct ath10k_ce_pipe *ce_state,
 
 static int ath10k_ce_init_src_ring(struct ath10k *ar,
                                   unsigned int ce_id,
-                                  struct ath10k_ce_pipe *ce_state,
                                   const struct ce_attr *attr)
 {
        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
-       struct ath10k_ce_ring *src_ring;
-       unsigned int nentries = attr->src_nentries;
-       unsigned int ce_nbytes;
-       u32 ctrl_addr = ath10k_ce_base_address(ce_id);
-       dma_addr_t base_addr;
-       char *ptr;
-
-       nentries = roundup_pow_of_two(nentries);
-
-       if (ce_state->src_ring) {
-               WARN_ON(ce_state->src_ring->nentries != nentries);
-               return 0;
-       }
+       struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
+       struct ath10k_ce_ring *src_ring = ce_state->src_ring;
+       u32 nentries, ctrl_addr = ath10k_ce_base_address(ce_id);
 
-       ce_nbytes = sizeof(struct ath10k_ce_ring) + (nentries * sizeof(void *));
-       ptr = kzalloc(ce_nbytes, GFP_KERNEL);
-       if (ptr == NULL)
-               return -ENOMEM;
+       nentries = roundup_pow_of_two(attr->src_nentries);
 
-       ce_state->src_ring = (struct ath10k_ce_ring *)ptr;
-       src_ring = ce_state->src_ring;
-
-       ptr += sizeof(struct ath10k_ce_ring);
-       src_ring->nentries = nentries;
-       src_ring->nentries_mask = nentries - 1;
+       memset(src_ring->per_transfer_context, 0,
+              nentries * sizeof(*src_ring->per_transfer_context));
 
        src_ring->sw_index = ath10k_ce_src_ring_read_index_get(ar, ctrl_addr);
        src_ring->sw_index &= src_ring->nentries_mask;
@@ -878,21 +860,87 @@ static int ath10k_ce_init_src_ring(struct ath10k *ar,
                ath10k_ce_src_ring_write_index_get(ar, ctrl_addr);
        src_ring->write_index &= src_ring->nentries_mask;
 
-       src_ring->per_transfer_context = (void **)ptr;
+       ath10k_ce_src_ring_base_addr_set(ar, ctrl_addr,
+                                        src_ring->base_addr_ce_space);
+       ath10k_ce_src_ring_size_set(ar, ctrl_addr, nentries);
+       ath10k_ce_src_ring_dmax_set(ar, ctrl_addr, attr->src_sz_max);
+       ath10k_ce_src_ring_byte_swap_set(ar, ctrl_addr, 0);
+       ath10k_ce_src_ring_lowmark_set(ar, ctrl_addr, 0);
+       ath10k_ce_src_ring_highmark_set(ar, ctrl_addr, nentries);
+
+       ath10k_dbg(ATH10K_DBG_BOOT,
+                  "boot init ce src ring id %d entries %d base_addr %p\n",
+                  ce_id, nentries, src_ring->base_addr_owner_space);
+
+       return 0;
+}
+
+static int ath10k_ce_init_dest_ring(struct ath10k *ar,
+                                   unsigned int ce_id,
+                                   const struct ce_attr *attr)
+{
+       struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+       struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
+       struct ath10k_ce_ring *dest_ring = ce_state->dest_ring;
+       u32 nentries, ctrl_addr = ath10k_ce_base_address(ce_id);
+
+       nentries = roundup_pow_of_two(attr->dest_nentries);
+
+       memset(dest_ring->per_transfer_context, 0,
+              nentries * sizeof(*dest_ring->per_transfer_context));
+
+       dest_ring->sw_index = ath10k_ce_dest_ring_read_index_get(ar, ctrl_addr);
+       dest_ring->sw_index &= dest_ring->nentries_mask;
+       dest_ring->write_index =
+               ath10k_ce_dest_ring_write_index_get(ar, ctrl_addr);
+       dest_ring->write_index &= dest_ring->nentries_mask;
+
+       ath10k_ce_dest_ring_base_addr_set(ar, ctrl_addr,
+                                         dest_ring->base_addr_ce_space);
+       ath10k_ce_dest_ring_size_set(ar, ctrl_addr, nentries);
+       ath10k_ce_dest_ring_byte_swap_set(ar, ctrl_addr, 0);
+       ath10k_ce_dest_ring_lowmark_set(ar, ctrl_addr, 0);
+       ath10k_ce_dest_ring_highmark_set(ar, ctrl_addr, nentries);
+
+       ath10k_dbg(ATH10K_DBG_BOOT,
+                  "boot ce dest ring id %d entries %d base_addr %p\n",
+                  ce_id, nentries, dest_ring->base_addr_owner_space);
+
+       return 0;
+}
+
+static struct ath10k_ce_ring *
+ath10k_ce_alloc_src_ring(struct ath10k *ar, unsigned int ce_id,
+                        const struct ce_attr *attr)
+{
+       struct ath10k_ce_ring *src_ring;
+       u32 nentries = attr->src_nentries;
+       dma_addr_t base_addr;
+
+       nentries = roundup_pow_of_two(nentries);
+
+       src_ring = kzalloc(sizeof(*src_ring) +
+                          (nentries *
+                           sizeof(*src_ring->per_transfer_context)),
+                          GFP_KERNEL);
+       if (src_ring == NULL)
+               return ERR_PTR(-ENOMEM);
+
+       src_ring->nentries = nentries;
+       src_ring->nentries_mask = nentries - 1;
 
        /*
         * Legacy platforms that do not support cache
         * coherent DMA are unsupported
         */
        src_ring->base_addr_owner_space_unaligned =
-               pci_alloc_consistent(ar_pci->pdev,
-                                    (nentries * sizeof(struct ce_desc) +
-                                     CE_DESC_RING_ALIGN),
-                                    &base_addr);
+               dma_alloc_coherent(ar->dev,
+                                  (nentries * sizeof(struct ce_desc) +
+                                   CE_DESC_RING_ALIGN),
+                                  &base_addr, GFP_KERNEL);
        if (!src_ring->base_addr_owner_space_unaligned) {
-               kfree(ce_state->src_ring);
-               ce_state->src_ring = NULL;
-               return -ENOMEM;
+               kfree(src_ring);
+               return ERR_PTR(-ENOMEM);
        }
 
        src_ring->base_addr_ce_space_unaligned = base_addr;
@@ -912,88 +960,54 @@ static int ath10k_ce_init_src_ring(struct ath10k *ar,
                kmalloc((nentries * sizeof(struct ce_desc) +
                         CE_DESC_RING_ALIGN), GFP_KERNEL);
        if (!src_ring->shadow_base_unaligned) {
-               pci_free_consistent(ar_pci->pdev,
-                                   (nentries * sizeof(struct ce_desc) +
-                                    CE_DESC_RING_ALIGN),
-                                   src_ring->base_addr_owner_space,
-                                   src_ring->base_addr_ce_space);
-               kfree(ce_state->src_ring);
-               ce_state->src_ring = NULL;
-               return -ENOMEM;
+               dma_free_coherent(ar->dev,
+                                 (nentries * sizeof(struct ce_desc) +
+                                  CE_DESC_RING_ALIGN),
+                                 src_ring->base_addr_owner_space,
+                                 src_ring->base_addr_ce_space);
+               kfree(src_ring);
+               return ERR_PTR(-ENOMEM);
        }
 
        src_ring->shadow_base = PTR_ALIGN(
                        src_ring->shadow_base_unaligned,
                        CE_DESC_RING_ALIGN);
 
-       ath10k_ce_src_ring_base_addr_set(ar, ctrl_addr,
-                                        src_ring->base_addr_ce_space);
-       ath10k_ce_src_ring_size_set(ar, ctrl_addr, nentries);
-       ath10k_ce_src_ring_dmax_set(ar, ctrl_addr, attr->src_sz_max);
-       ath10k_ce_src_ring_byte_swap_set(ar, ctrl_addr, 0);
-       ath10k_ce_src_ring_lowmark_set(ar, ctrl_addr, 0);
-       ath10k_ce_src_ring_highmark_set(ar, ctrl_addr, nentries);
-
-       ath10k_dbg(ATH10K_DBG_BOOT,
-                  "boot ce src ring id %d entries %d base_addr %p\n",
-                  ce_id, nentries, src_ring->base_addr_owner_space);
-
-       return 0;
+       return src_ring;
 }
 
-static int ath10k_ce_init_dest_ring(struct ath10k *ar,
-                                   unsigned int ce_id,
-                                   struct ath10k_ce_pipe *ce_state,
-                                   const struct ce_attr *attr)
+static struct ath10k_ce_ring *
+ath10k_ce_alloc_dest_ring(struct ath10k *ar, unsigned int ce_id,
+                         const struct ce_attr *attr)
 {
-       struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
        struct ath10k_ce_ring *dest_ring;
-       unsigned int nentries = attr->dest_nentries;
-       unsigned int ce_nbytes;
-       u32 ctrl_addr = ath10k_ce_base_address(ce_id);
+       u32 nentries;
        dma_addr_t base_addr;
-       char *ptr;
 
-       nentries = roundup_pow_of_two(nentries);
+       nentries = roundup_pow_of_two(attr->dest_nentries);
 
-       if (ce_state->dest_ring) {
-               WARN_ON(ce_state->dest_ring->nentries != nentries);
-               return 0;
-       }
-
-       ce_nbytes = sizeof(struct ath10k_ce_ring) + (nentries * sizeof(void *));
-       ptr = kzalloc(ce_nbytes, GFP_KERNEL);
-       if (ptr == NULL)
-               return -ENOMEM;
-
-       ce_state->dest_ring = (struct ath10k_ce_ring *)ptr;
-       dest_ring = ce_state->dest_ring;
+       dest_ring = kzalloc(sizeof(*dest_ring) +
+                           (nentries *
+                            sizeof(*dest_ring->per_transfer_context)),
+                           GFP_KERNEL);
+       if (dest_ring == NULL)
+               return ERR_PTR(-ENOMEM);
 
-       ptr += sizeof(struct ath10k_ce_ring);
        dest_ring->nentries = nentries;
        dest_ring->nentries_mask = nentries - 1;
 
-       dest_ring->sw_index = ath10k_ce_dest_ring_read_index_get(ar, ctrl_addr);
-       dest_ring->sw_index &= dest_ring->nentries_mask;
-       dest_ring->write_index =
-               ath10k_ce_dest_ring_write_index_get(ar, ctrl_addr);
-       dest_ring->write_index &= dest_ring->nentries_mask;
-
-       dest_ring->per_transfer_context = (void **)ptr;
-
        /*
         * Legacy platforms that do not support cache
         * coherent DMA are unsupported
         */
        dest_ring->base_addr_owner_space_unaligned =
-               pci_alloc_consistent(ar_pci->pdev,
-                                    (nentries * sizeof(struct ce_desc) +
-                                     CE_DESC_RING_ALIGN),
-                                    &base_addr);
+               dma_alloc_coherent(ar->dev,
+                                  (nentries * sizeof(struct ce_desc) +
+                                   CE_DESC_RING_ALIGN),
+                                  &base_addr, GFP_KERNEL);
        if (!dest_ring->base_addr_owner_space_unaligned) {
-               kfree(ce_state->dest_ring);
-               ce_state->dest_ring = NULL;
-               return -ENOMEM;
+               kfree(dest_ring);
+               return ERR_PTR(-ENOMEM);
        }
 
        dest_ring->base_addr_ce_space_unaligned = base_addr;
@@ -1012,39 +1026,7 @@ static int ath10k_ce_init_dest_ring(struct ath10k *ar,
                        dest_ring->base_addr_ce_space_unaligned,
                        CE_DESC_RING_ALIGN);
 
-       ath10k_ce_dest_ring_base_addr_set(ar, ctrl_addr,
-                                         dest_ring->base_addr_ce_space);
-       ath10k_ce_dest_ring_size_set(ar, ctrl_addr, nentries);
-       ath10k_ce_dest_ring_byte_swap_set(ar, ctrl_addr, 0);
-       ath10k_ce_dest_ring_lowmark_set(ar, ctrl_addr, 0);
-       ath10k_ce_dest_ring_highmark_set(ar, ctrl_addr, nentries);
-
-       ath10k_dbg(ATH10K_DBG_BOOT,
-                  "boot ce dest ring id %d entries %d base_addr %p\n",
-                  ce_id, nentries, dest_ring->base_addr_owner_space);
-
-       return 0;
-}
-
-static struct ath10k_ce_pipe *ath10k_ce_init_state(struct ath10k *ar,
-                                            unsigned int ce_id,
-                                            const struct ce_attr *attr)
-{
-       struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
-       struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
-       u32 ctrl_addr = ath10k_ce_base_address(ce_id);
-
-       spin_lock_bh(&ar_pci->ce_lock);
-
-       ce_state->ar = ar;
-       ce_state->id = ce_id;
-       ce_state->ctrl_addr = ctrl_addr;
-       ce_state->attr_flags = attr->flags;
-       ce_state->src_sz_max = attr->src_sz_max;
-
-       spin_unlock_bh(&ar_pci->ce_lock);
-
-       return ce_state;
+       return dest_ring;
 }
 
 /*
@@ -1054,11 +1036,11 @@ static struct ath10k_ce_pipe *ath10k_ce_init_state(struct ath10k *ar,
  * initialization. It may be that only one side or the other is
  * initialized by software/firmware.
  */
-struct ath10k_ce_pipe *ath10k_ce_init(struct ath10k *ar,
-                               unsigned int ce_id,
-                               const struct ce_attr *attr)
+int ath10k_ce_init_pipe(struct ath10k *ar, unsigned int ce_id,
+                       const struct ce_attr *attr)
 {
-       struct ath10k_ce_pipe *ce_state;
+       struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+       struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
        int ret;
 
        /*
@@ -1074,64 +1056,128 @@ struct ath10k_ce_pipe *ath10k_ce_init(struct ath10k *ar,
 
        ret = ath10k_pci_wake(ar);
        if (ret)
-               return NULL;
+               return ret;
 
-       ce_state = ath10k_ce_init_state(ar, ce_id, attr);
-       if (!ce_state) {
-               ath10k_err("Failed to initialize CE state for ID: %d\n", ce_id);
-               goto out;
-       }
+       spin_lock_bh(&ar_pci->ce_lock);
+       ce_state->ar = ar;
+       ce_state->id = ce_id;
+       ce_state->ctrl_addr = ath10k_ce_base_address(ce_id);
+       ce_state->attr_flags = attr->flags;
+       ce_state->src_sz_max = attr->src_sz_max;
+       spin_unlock_bh(&ar_pci->ce_lock);
 
        if (attr->src_nentries) {
-               ret = ath10k_ce_init_src_ring(ar, ce_id, ce_state, attr);
+               ret = ath10k_ce_init_src_ring(ar, ce_id, attr);
                if (ret) {
                        ath10k_err("Failed to initialize CE src ring for ID: %d (%d)\n",
                                   ce_id, ret);
-                       ath10k_ce_deinit(ce_state);
-                       ce_state = NULL;
                        goto out;
                }
        }
 
        if (attr->dest_nentries) {
-               ret = ath10k_ce_init_dest_ring(ar, ce_id, ce_state, attr);
+               ret = ath10k_ce_init_dest_ring(ar, ce_id, attr);
                if (ret) {
                        ath10k_err("Failed to initialize CE dest ring for ID: %d (%d)\n",
                                   ce_id, ret);
-                       ath10k_ce_deinit(ce_state);
-                       ce_state = NULL;
                        goto out;
                }
        }
 
 out:
        ath10k_pci_sleep(ar);
-       return ce_state;
+       return ret;
 }
 
-void ath10k_ce_deinit(struct ath10k_ce_pipe *ce_state)
+static void ath10k_ce_deinit_src_ring(struct ath10k *ar, unsigned int ce_id)
+{
+       u32 ctrl_addr = ath10k_ce_base_address(ce_id);
+
+       ath10k_ce_src_ring_base_addr_set(ar, ctrl_addr, 0);
+       ath10k_ce_src_ring_size_set(ar, ctrl_addr, 0);
+       ath10k_ce_src_ring_dmax_set(ar, ctrl_addr, 0);
+       ath10k_ce_src_ring_highmark_set(ar, ctrl_addr, 0);
+}
+
+static void ath10k_ce_deinit_dest_ring(struct ath10k *ar, unsigned int ce_id)
+{
+       u32 ctrl_addr = ath10k_ce_base_address(ce_id);
+
+       ath10k_ce_dest_ring_base_addr_set(ar, ctrl_addr, 0);
+       ath10k_ce_dest_ring_size_set(ar, ctrl_addr, 0);
+       ath10k_ce_dest_ring_highmark_set(ar, ctrl_addr, 0);
+}
+
+void ath10k_ce_deinit_pipe(struct ath10k *ar, unsigned int ce_id)
+{
+       int ret;
+
+       ret = ath10k_pci_wake(ar);
+       if (ret)
+               return;
+
+       ath10k_ce_deinit_src_ring(ar, ce_id);
+       ath10k_ce_deinit_dest_ring(ar, ce_id);
+
+       ath10k_pci_sleep(ar);
+}
+
+int ath10k_ce_alloc_pipe(struct ath10k *ar, int ce_id,
+                        const struct ce_attr *attr)
+{
+       struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+       struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
+       int ret;
+
+       if (attr->src_nentries) {
+               ce_state->src_ring = ath10k_ce_alloc_src_ring(ar, ce_id, attr);
+               if (IS_ERR(ce_state->src_ring)) {
+                       ret = PTR_ERR(ce_state->src_ring);
+                       ath10k_err("failed to allocate copy engine source ring %d: %d\n",
+                                  ce_id, ret);
+                       ce_state->src_ring = NULL;
+                       return ret;
+               }
+       }
+
+       if (attr->dest_nentries) {
+               ce_state->dest_ring = ath10k_ce_alloc_dest_ring(ar, ce_id,
+                                                               attr);
+               if (IS_ERR(ce_state->dest_ring)) {
+                       ret = PTR_ERR(ce_state->dest_ring);
+                       ath10k_err("failed to allocate copy engine destination ring %d: %d\n",
+                                  ce_id, ret);
+                       ce_state->dest_ring = NULL;
+                       return ret;
+               }
+       }
+
+       return 0;
+}
+
+void ath10k_ce_free_pipe(struct ath10k *ar, int ce_id)
 {
-       struct ath10k *ar = ce_state->ar;
        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+       struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
 
        if (ce_state->src_ring) {
                kfree(ce_state->src_ring->shadow_base_unaligned);
-               pci_free_consistent(ar_pci->pdev,
-                                   (ce_state->src_ring->nentries *
-                                    sizeof(struct ce_desc) +
-                                    CE_DESC_RING_ALIGN),
-                                   ce_state->src_ring->base_addr_owner_space,
-                                   ce_state->src_ring->base_addr_ce_space);
+               dma_free_coherent(ar->dev,
+                                 (ce_state->src_ring->nentries *
+                                  sizeof(struct ce_desc) +
+                                  CE_DESC_RING_ALIGN),
+                                 ce_state->src_ring->base_addr_owner_space,
+                                 ce_state->src_ring->base_addr_ce_space);
                kfree(ce_state->src_ring);
        }
 
        if (ce_state->dest_ring) {
-               pci_free_consistent(ar_pci->pdev,
-                                   (ce_state->dest_ring->nentries *
-                                    sizeof(struct ce_desc) +
-                                    CE_DESC_RING_ALIGN),
-                                   ce_state->dest_ring->base_addr_owner_space,
-                                   ce_state->dest_ring->base_addr_ce_space);
+               dma_free_coherent(ar->dev,
+                                 (ce_state->dest_ring->nentries *
+                                  sizeof(struct ce_desc) +
+                                  CE_DESC_RING_ALIGN),
+                                 ce_state->dest_ring->base_addr_owner_space,
+                                 ce_state->dest_ring->base_addr_ce_space);
                kfree(ce_state->dest_ring);
        }
 
index 8eb7f99ed992277b0efb3e7ae4f971b8e4eb7557..fd0bc3561e42a9ea4d84644524b9f19b4cdfb207 100644 (file)
@@ -104,7 +104,8 @@ struct ath10k_ce_ring {
        void *shadow_base_unaligned;
        struct ce_desc *shadow_base;
 
-       void **per_transfer_context;
+       /* keep last */
+       void *per_transfer_context[0];
 };
 
 struct ath10k_ce_pipe {
@@ -210,10 +211,12 @@ int ath10k_ce_completed_send_next(struct ath10k_ce_pipe *ce_state,
 
 /*==================CE Engine Initialization=======================*/
 
-/* Initialize an instance of a CE */
-struct ath10k_ce_pipe *ath10k_ce_init(struct ath10k *ar,
-                               unsigned int ce_id,
-                               const struct ce_attr *attr);
+int ath10k_ce_init_pipe(struct ath10k *ar, unsigned int ce_id,
+                       const struct ce_attr *attr);
+void ath10k_ce_deinit_pipe(struct ath10k *ar, unsigned int ce_id);
+int ath10k_ce_alloc_pipe(struct ath10k *ar, int ce_id,
+                         const struct ce_attr *attr);
+void ath10k_ce_free_pipe(struct ath10k *ar, int ce_id);
 
 /*==================CE Engine Shutdown=======================*/
 /*
@@ -236,8 +239,6 @@ int ath10k_ce_cancel_send_next(struct ath10k_ce_pipe *ce_state,
                               unsigned int *nbytesp,
                               unsigned int *transfer_idp);
 
-void ath10k_ce_deinit(struct ath10k_ce_pipe *ce_state);
-
 /*==================CE Interrupt Handlers====================*/
 void ath10k_ce_per_engine_service_any(struct ath10k *ar);
 void ath10k_ce_per_engine_service(struct ath10k *ar, unsigned int ce_id);
index ebc5fc2ede75cbac75da3a2f789a946d4e1274ad..6abde37fb339c981f2c772af9ef908f5a32a66be 100644 (file)
@@ -249,30 +249,40 @@ exit:
 
 static int ath10k_download_and_run_otp(struct ath10k *ar)
 {
-       u32 address = ar->hw_params.patch_load_addr;
-       u32 exec_param;
+       u32 result, address = ar->hw_params.patch_load_addr;
        int ret;
 
        /* OTP is optional */
 
-       if (!ar->otp_data || !ar->otp_len)
+       if (!ar->otp_data || !ar->otp_len) {
+               ath10k_warn("Not running otp, calibration will be incorrect (otp-data %p otp_len %zd)!\n",
+                           ar->otp_data, ar->otp_len);
                return 0;
+       }
+
+       ath10k_dbg(ATH10K_DBG_BOOT, "boot upload otp to 0x%x len %zd\n",
+                  address, ar->otp_len);
 
        ret = ath10k_bmi_fast_download(ar, address, ar->otp_data, ar->otp_len);
        if (ret) {
                ath10k_err("could not write otp (%d)\n", ret);
-               goto exit;
+               return ret;
        }
 
-       exec_param = 0;
-       ret = ath10k_bmi_execute(ar, address, &exec_param);
+       ret = ath10k_bmi_execute(ar, address, 0, &result);
        if (ret) {
                ath10k_err("could not execute otp (%d)\n", ret);
-               goto exit;
+               return ret;
        }
 
-exit:
-       return ret;
+       ath10k_dbg(ATH10K_DBG_BOOT, "boot otp execute result %d\n", result);
+
+       if (result != 0) {
+               ath10k_err("otp calibration failed: %d", result);
+               return -EINVAL;
+       }
+
+       return 0;
 }
 
 static int ath10k_download_fw(struct ath10k *ar)
@@ -389,8 +399,8 @@ static int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name)
        /* first fetch the firmware file (firmware-*.bin) */
        ar->firmware = ath10k_fetch_fw_file(ar, ar->hw_params.fw.dir, name);
        if (IS_ERR(ar->firmware)) {
-               ath10k_err("Could not fetch firmware file '%s': %ld\n",
-                          name, PTR_ERR(ar->firmware));
+               ath10k_err("could not fetch firmware file '%s/%s': %ld\n",
+                          ar->hw_params.fw.dir, name, PTR_ERR(ar->firmware));
                return PTR_ERR(ar->firmware);
        }
 
@@ -401,14 +411,14 @@ static int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name)
        magic_len = strlen(ATH10K_FIRMWARE_MAGIC) + 1;
 
        if (len < magic_len) {
-               ath10k_err("firmware image too small to contain magic: %zu\n",
-                          len);
+               ath10k_err("firmware file '%s/%s' too small to contain magic: %zu\n",
+                          ar->hw_params.fw.dir, name, len);
                ret = -EINVAL;
                goto err;
        }
 
        if (memcmp(data, ATH10K_FIRMWARE_MAGIC, magic_len) != 0) {
-               ath10k_err("Invalid firmware magic\n");
+               ath10k_err("invalid firmware magic\n");
                ret = -EINVAL;
                goto err;
        }
@@ -430,7 +440,7 @@ static int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name)
                data += sizeof(*hdr);
 
                if (len < ie_len) {
-                       ath10k_err("Invalid length for FW IE %d (%zu < %zu)\n",
+                       ath10k_err("invalid length for FW IE %d (%zu < %zu)\n",
                                   ie_id, len, ie_len);
                        ret = -EINVAL;
                        goto err;
@@ -513,8 +523,8 @@ static int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name)
        }
 
        if (!ar->firmware_data || !ar->firmware_len) {
-               ath10k_warn("No ATH10K_FW_IE_FW_IMAGE found from %s, skipping\n",
-                           name);
+               ath10k_warn("No ATH10K_FW_IE_FW_IMAGE found from '%s/%s', skipping\n",
+                           ar->hw_params.fw.dir, name);
                ret = -ENOMEDIUM;
                goto err;
        }
@@ -531,7 +541,9 @@ static int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name)
                                         ar->hw_params.fw.board);
        if (IS_ERR(ar->board)) {
                ret = PTR_ERR(ar->board);
-               ath10k_err("could not fetch board data (%d)\n", ret);
+               ath10k_err("could not fetch board data '%s/%s' (%d)\n",
+                          ar->hw_params.fw.dir, ar->hw_params.fw.board,
+                          ret);
                goto err;
        }
 
@@ -549,19 +561,21 @@ static int ath10k_core_fetch_firmware_files(struct ath10k *ar)
 {
        int ret;
 
+       ar->fw_api = 2;
+       ath10k_dbg(ATH10K_DBG_BOOT, "trying fw api %d\n", ar->fw_api);
+
        ret = ath10k_core_fetch_firmware_api_n(ar, ATH10K_FW_API2_FILE);
-       if (ret == 0) {
-               ar->fw_api = 2;
-               goto out;
-       }
+       if (ret == 0)
+               goto success;
+
+       ar->fw_api = 1;
+       ath10k_dbg(ATH10K_DBG_BOOT, "trying fw api %d\n", ar->fw_api);
 
        ret = ath10k_core_fetch_firmware_api_1(ar);
        if (ret)
                return ret;
 
-       ar->fw_api = 1;
-
-out:
+success:
        ath10k_dbg(ATH10K_DBG_BOOT, "using fw api %d\n", ar->fw_api);
 
        return 0;
@@ -572,16 +586,22 @@ static int ath10k_init_download_firmware(struct ath10k *ar)
        int ret;
 
        ret = ath10k_download_board_data(ar);
-       if (ret)
+       if (ret) {
+               ath10k_err("failed to download board data: %d\n", ret);
                return ret;
+       }
 
        ret = ath10k_download_and_run_otp(ar);
-       if (ret)
+       if (ret) {
+               ath10k_err("failed to run otp: %d\n", ret);
                return ret;
+       }
 
        ret = ath10k_download_fw(ar);
-       if (ret)
+       if (ret) {
+               ath10k_err("failed to download firmware: %d\n", ret);
                return ret;
+       }
 
        return ret;
 }
@@ -835,9 +855,12 @@ int ath10k_core_start(struct ath10k *ar)
        INIT_LIST_HEAD(&ar->arvifs);
 
        if (!test_bit(ATH10K_FLAG_FIRST_BOOT_DONE, &ar->dev_flags))
-               ath10k_info("%s (0x%x) fw %s api %d htt %d.%d\n",
-                           ar->hw_params.name, ar->target_version,
-                           ar->hw->wiphy->fw_version, ar->fw_api,
+               ath10k_info("%s (0x%08x, 0x%08x) fw %s api %d htt %d.%d\n",
+                           ar->hw_params.name,
+                           ar->target_version,
+                           ar->chip_id,
+                           ar->hw->wiphy->fw_version,
+                           ar->fw_api,
                            ar->htt.target_version_major,
                            ar->htt.target_version_minor);
 
index 0e71979d837cf90888c74e4d85035c2a4d6fd4ef..2c1dfd71914688ec80bb4849031458943c986293 100644 (file)
@@ -119,6 +119,7 @@ struct ath10k_peer_stat {
        u8 peer_macaddr[ETH_ALEN];
        u32 peer_rssi;
        u32 peer_tx_rate;
+       u32 peer_rx_rate; /* 10x only */
 };
 
 struct ath10k_target_stats {
@@ -130,6 +131,12 @@ struct ath10k_target_stats {
        u32 cycle_count;
        u32 phy_err_count;
        u32 chan_tx_power;
+       u32 ack_rx_bad;
+       u32 rts_bad;
+       u32 rts_good;
+       u32 fcs_bad;
+       u32 no_beacons;
+       u32 mib_int_count;
 
        /* PDEV TX stats */
        s32 comp_queued;
@@ -260,6 +267,8 @@ struct ath10k_vif {
        u8 fixed_rate;
        u8 fixed_nss;
        u8 force_sgi;
+       bool use_cts_prot;
+       int num_legacy_stations;
 };
 
 struct ath10k_vif_iter {
@@ -419,13 +428,18 @@ struct ath10k {
        struct cfg80211_chan_def chandef;
 
        int free_vdev_map;
+       bool promisc;
+       bool monitor;
        int monitor_vdev_id;
-       bool monitor_enabled;
-       bool monitor_present;
+       bool monitor_started;
        unsigned int filter_flags;
        unsigned long dev_flags;
        u32 dfs_block_radar_events;
 
+       /* protected by conf_mutex */
+       bool radar_enabled;
+       int num_started_vdevs;
+
        struct wmi_pdev_set_wmm_params_arg wmm_params;
        struct completion install_key_done;
 
index 6debd281350aeb840978606212655fba6d6fb7a3..1b7ff4ba122ce42af61265eae30d8fb98d97201e 100644 (file)
@@ -161,7 +161,7 @@ void ath10k_debug_read_target_stats(struct ath10k *ar,
        u8 *tmp = ev->data;
        struct ath10k_target_stats *stats;
        int num_pdev_stats, num_vdev_stats, num_peer_stats;
-       struct wmi_pdev_stats *ps;
+       struct wmi_pdev_stats_10x *ps;
        int i;
 
        spin_lock_bh(&ar->data_lock);
@@ -173,7 +173,7 @@ void ath10k_debug_read_target_stats(struct ath10k *ar,
        num_peer_stats = __le32_to_cpu(ev->num_peer_stats); /* 0 or max peers */
 
        if (num_pdev_stats) {
-               ps = (struct wmi_pdev_stats *)tmp;
+               ps = (struct wmi_pdev_stats_10x *)tmp;
 
                stats->ch_noise_floor = __le32_to_cpu(ps->chan_nf);
                stats->tx_frame_count = __le32_to_cpu(ps->tx_frame_count);
@@ -228,7 +228,18 @@ void ath10k_debug_read_target_stats(struct ath10k *ar,
                stats->phy_err_drop = __le32_to_cpu(ps->wal.rx.phy_err_drop);
                stats->mpdu_errs = __le32_to_cpu(ps->wal.rx.mpdu_errs);
 
-               tmp += sizeof(struct wmi_pdev_stats);
+               if (test_bit(ATH10K_FW_FEATURE_WMI_10X,
+                            ar->fw_features)) {
+                       stats->ack_rx_bad = __le32_to_cpu(ps->ack_rx_bad);
+                       stats->rts_bad = __le32_to_cpu(ps->rts_bad);
+                       stats->rts_good = __le32_to_cpu(ps->rts_good);
+                       stats->fcs_bad = __le32_to_cpu(ps->fcs_bad);
+                       stats->no_beacons = __le32_to_cpu(ps->no_beacons);
+                       stats->mib_int_count = __le32_to_cpu(ps->mib_int_count);
+                       tmp += sizeof(struct wmi_pdev_stats_10x);
+               } else {
+                       tmp += sizeof(struct wmi_pdev_stats_old);
+               }
        }
 
        /* 0 or max vdevs */
@@ -243,22 +254,29 @@ void ath10k_debug_read_target_stats(struct ath10k *ar,
        }
 
        if (num_peer_stats) {
-               struct wmi_peer_stats *peer_stats;
+               struct wmi_peer_stats_10x *peer_stats;
                struct ath10k_peer_stat *s;
 
                stats->peers = num_peer_stats;
 
                for (i = 0; i < num_peer_stats; i++) {
-                       peer_stats = (struct wmi_peer_stats *)tmp;
+                       peer_stats = (struct wmi_peer_stats_10x *)tmp;
                        s = &stats->peer_stat[i];
 
-                       WMI_MAC_ADDR_TO_CHAR_ARRAY(&peer_stats->peer_macaddr,
-                                                  s->peer_macaddr);
+                       memcpy(s->peer_macaddr, &peer_stats->peer_macaddr.addr,
+                              ETH_ALEN);
                        s->peer_rssi = __le32_to_cpu(peer_stats->peer_rssi);
                        s->peer_tx_rate =
                                __le32_to_cpu(peer_stats->peer_tx_rate);
-
-                       tmp += sizeof(struct wmi_peer_stats);
+                       if (test_bit(ATH10K_FW_FEATURE_WMI_10X,
+                                    ar->fw_features)) {
+                               s->peer_rx_rate =
+                                       __le32_to_cpu(peer_stats->peer_rx_rate);
+                               tmp += sizeof(struct wmi_peer_stats_10x);
+
+                       } else {
+                               tmp += sizeof(struct wmi_peer_stats_old);
+                       }
                }
        }
 
@@ -272,7 +290,7 @@ static ssize_t ath10k_read_fw_stats(struct file *file, char __user *user_buf,
        struct ath10k *ar = file->private_data;
        struct ath10k_target_stats *fw_stats;
        char *buf = NULL;
-       unsigned int len = 0, buf_len = 2500;
+       unsigned int len = 0, buf_len = 8000;
        ssize_t ret_cnt = 0;
        long left;
        int i;
@@ -320,6 +338,16 @@ static ssize_t ath10k_read_fw_stats(struct file *file, char __user *user_buf,
                         "Cycle count", fw_stats->cycle_count);
        len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
                         "PHY error count", fw_stats->phy_err_count);
+       len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+                        "RTS bad count", fw_stats->rts_bad);
+       len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+                        "RTS good count", fw_stats->rts_good);
+       len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+                        "FCS bad count", fw_stats->fcs_bad);
+       len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+                        "No beacon count", fw_stats->no_beacons);
+       len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+                        "MIB int count", fw_stats->mib_int_count);
 
        len += scnprintf(buf + len, buf_len - len, "\n");
        len += scnprintf(buf + len, buf_len - len, "%30s\n",
@@ -411,8 +439,8 @@ static ssize_t ath10k_read_fw_stats(struct file *file, char __user *user_buf,
                         "MPDU errors (FCS, MIC, ENC)", fw_stats->mpdu_errs);
 
        len += scnprintf(buf + len, buf_len - len, "\n");
-       len += scnprintf(buf + len, buf_len - len, "%30s\n",
-                        "ath10k PEER stats");
+       len += scnprintf(buf + len, buf_len - len, "%30s (%d)\n",
+                        "ath10k PEER stats", fw_stats->peers);
        len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
                                 "=================");
 
@@ -425,6 +453,9 @@ static ssize_t ath10k_read_fw_stats(struct file *file, char __user *user_buf,
                len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
                                 "Peer TX rate",
                                 fw_stats->peer_stat[i].peer_tx_rate);
+               len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+                                "Peer RX rate",
+                                fw_stats->peer_stat[i].peer_rx_rate);
                len += scnprintf(buf + len, buf_len - len, "\n");
        }
        spin_unlock_bh(&ar->data_lock);
@@ -451,27 +482,37 @@ static ssize_t ath10k_read_simulate_fw_crash(struct file *file,
                                             char __user *user_buf,
                                             size_t count, loff_t *ppos)
 {
-       const char buf[] = "To simulate firmware crash write the keyword"
-                          " `crash` to this file.\nThis will force firmware"
-                          " to report a crash to the host system.\n";
+       const char buf[] = "To simulate firmware crash write one of the"
+                          " keywords to this file:\n `soft` - this will send"
+                          " WMI_FORCE_FW_HANG_ASSERT to firmware if FW"
+                          " supports that command.\n `hard` - this will send"
+                          " to firmware command with illegal parameters"
+                          " causing firmware crash.\n";
+
        return simple_read_from_buffer(user_buf, count, ppos, buf, strlen(buf));
 }
 
+/* Simulate firmware crash:
+ * 'soft': Call wmi command causing firmware hang. This firmware hang is
+ * recoverable by warm firmware reset.
+ * 'hard': Force firmware crash by setting any vdev parameter for not allowed
+ * vdev id. This is hard firmware crash because it is recoverable only by cold
+ * firmware reset.
+ */
 static ssize_t ath10k_write_simulate_fw_crash(struct file *file,
                                              const char __user *user_buf,
                                              size_t count, loff_t *ppos)
 {
        struct ath10k *ar = file->private_data;
-       char buf[32] = {};
+       char buf[32];
        int ret;
 
        mutex_lock(&ar->conf_mutex);
 
        simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, user_buf, count);
-       if (strcmp(buf, "crash") && strcmp(buf, "crash\n")) {
-               ret = -EINVAL;
-               goto exit;
-       }
+
+       /* make sure that buf is null terminated */
+       buf[sizeof(buf) - 1] = 0;
 
        if (ar->state != ATH10K_STATE_ON &&
            ar->state != ATH10K_STATE_RESTARTED) {
@@ -479,14 +520,30 @@ static ssize_t ath10k_write_simulate_fw_crash(struct file *file,
                goto exit;
        }
 
-       ath10k_info("simulating firmware crash\n");
+       /* drop the possible '\n' from the end */
+       if (buf[count - 1] == '\n') {
+               buf[count - 1] = 0;
+               count--;
+       }
 
-       ret = ath10k_wmi_force_fw_hang(ar, WMI_FORCE_FW_HANG_ASSERT, 0);
-       if (ret)
-               ath10k_warn("failed to force fw hang (%d)\n", ret);
+       if (!strcmp(buf, "soft")) {
+               ath10k_info("simulating soft firmware crash\n");
+               ret = ath10k_wmi_force_fw_hang(ar, WMI_FORCE_FW_HANG_ASSERT, 0);
+       } else if (!strcmp(buf, "hard")) {
+               ath10k_info("simulating hard firmware crash\n");
+               ret = ath10k_wmi_vdev_set_param(ar, TARGET_NUM_VDEVS + 1,
+                                       ar->wmi.vdev_param->rts_threshold, 0);
+       } else {
+               ret = -EINVAL;
+               goto exit;
+       }
+
+       if (ret) {
+               ath10k_warn("failed to simulate firmware crash: %d\n", ret);
+               goto exit;
+       }
 
-       if (ret == 0)
-               ret = count;
+       ret = count;
 
 exit:
        mutex_unlock(&ar->conf_mutex);
index 7f1bccd3597f1bb2a3b40d5a482f308e99e9e27c..5b58dbb174161a5ead5a8a5865fac70ba7e1b056 100644 (file)
@@ -157,6 +157,9 @@ int ath10k_htc_send(struct ath10k_htc *htc,
                        goto err_pull;
                }
                ep->tx_credits -= credits;
+               ath10k_dbg(ATH10K_DBG_HTC,
+                          "htc ep %d consumed %d credits (total %d)\n",
+                          eid, credits, ep->tx_credits);
                spin_unlock_bh(&htc->tx_lock);
        }
 
@@ -185,6 +188,9 @@ err_credits:
        if (ep->tx_credit_flow_enabled) {
                spin_lock_bh(&htc->tx_lock);
                ep->tx_credits += credits;
+               ath10k_dbg(ATH10K_DBG_HTC,
+                          "htc ep %d reverted %d credits back (total %d)\n",
+                          eid, credits, ep->tx_credits);
                spin_unlock_bh(&htc->tx_lock);
 
                if (ep->ep_ops.ep_tx_credits)
@@ -234,12 +240,12 @@ ath10k_htc_process_credit_report(struct ath10k_htc *htc,
                if (report->eid >= ATH10K_HTC_EP_COUNT)
                        break;
 
-               ath10k_dbg(ATH10K_DBG_HTC, "ep %d got %d credits\n",
-                          report->eid, report->credits);
-
                ep = &htc->endpoint[report->eid];
                ep->tx_credits += report->credits;
 
+               ath10k_dbg(ATH10K_DBG_HTC, "htc ep %d got %d credits (total %d)\n",
+                          report->eid, report->credits, ep->tx_credits);
+
                if (ep->ep_ops.ep_tx_credits) {
                        spin_unlock_bh(&htc->tx_lock);
                        ep->ep_ops.ep_tx_credits(htc->ar);
index 654867fc1ae73bbd7a13cf4dc61f8ac89a0b7823..645a563e3fb9675a5c545277cec88b68a757fff0 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/bug.h>
 #include <linux/interrupt.h>
 #include <linux/dmapool.h>
+#include <net/mac80211.h>
 
 #include "htc.h"
 #include "rx_desc.h"
@@ -1172,23 +1173,6 @@ struct htt_peer_unmap_event {
        u16 peer_id;
 };
 
-struct htt_rx_info {
-       struct sk_buff *skb;
-       enum htt_rx_mpdu_status status;
-       enum htt_rx_mpdu_encrypt_type encrypt_type;
-       s8 signal;
-       struct {
-               u8 info0;
-               u32 info1;
-               u32 info2;
-       } rate;
-
-       u32 tsf;
-       bool fcs_err;
-       bool amsdu_more;
-       bool mic_err;
-};
-
 struct ath10k_htt_txbuf {
        struct htt_data_tx_desc_frag frags[2];
        struct ath10k_htc_hdr htc_hdr;
@@ -1289,6 +1273,9 @@ struct ath10k_htt {
        struct tasklet_struct txrx_compl_task;
        struct sk_buff_head tx_compl_q;
        struct sk_buff_head rx_compl_q;
+
+       /* rx_status template */
+       struct ieee80211_rx_status rx_status;
 };
 
 #define RX_HTT_HDR_STATUS_LEN 64
index cdcbe2de95f97d602cb086c301f0778aad5bc49c..f85a3cf6da3103d6f909b80715a1d0763fce7a39 100644 (file)
@@ -297,6 +297,7 @@ static void ath10k_htt_rx_free_msdu_chain(struct sk_buff *skb)
        }
 }
 
+/* return: < 0 fatal error, 0 - non chained msdu, 1 chained msdu */
 static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
                                   u8 **fw_desc, int *fw_desc_len,
                                   struct sk_buff **head_msdu,
@@ -310,7 +311,7 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
 
        if (htt->rx_confused) {
                ath10k_warn("htt is confused. refusing rx\n");
-               return 0;
+               return -1;
        }
 
        msdu = *head_msdu = ath10k_htt_rx_netbuf_pop(htt);
@@ -442,6 +443,9 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
        }
        *tail_msdu = msdu;
 
+       if (*head_msdu == NULL)
+               msdu_chaining = -1;
+
        /*
         * Don't refill the ring yet.
         *
@@ -636,6 +640,190 @@ struct amsdu_subframe_hdr {
        __be16 len;
 } __packed;
 
+static const u8 rx_legacy_rate_idx[] = {
+       3,      /* 0x00  - 11Mbps  */
+       2,      /* 0x01  - 5.5Mbps */
+       1,      /* 0x02  - 2Mbps   */
+       0,      /* 0x03  - 1Mbps   */
+       3,      /* 0x04  - 11Mbps  */
+       2,      /* 0x05  - 5.5Mbps */
+       1,      /* 0x06  - 2Mbps   */
+       0,      /* 0x07  - 1Mbps   */
+       10,     /* 0x08  - 48Mbps  */
+       8,      /* 0x09  - 24Mbps  */
+       6,      /* 0x0A  - 12Mbps  */
+       4,      /* 0x0B  - 6Mbps   */
+       11,     /* 0x0C  - 54Mbps  */
+       9,      /* 0x0D  - 36Mbps  */
+       7,      /* 0x0E  - 18Mbps  */
+       5,      /* 0x0F  - 9Mbps   */
+};
+
+static void ath10k_htt_rx_h_rates(struct ath10k *ar,
+                                 enum ieee80211_band band,
+                                 u8 info0, u32 info1, u32 info2,
+                                 struct ieee80211_rx_status *status)
+{
+       u8 cck, rate, rate_idx, bw, sgi, mcs, nss;
+       u8 preamble = 0;
+
+       /* Check if valid fields */
+       if (!(info0 & HTT_RX_INDICATION_INFO0_START_VALID))
+               return;
+
+       preamble = MS(info1, HTT_RX_INDICATION_INFO1_PREAMBLE_TYPE);
+
+       switch (preamble) {
+       case HTT_RX_LEGACY:
+               cck = info0 & HTT_RX_INDICATION_INFO0_LEGACY_RATE_CCK;
+               rate = MS(info0, HTT_RX_INDICATION_INFO0_LEGACY_RATE);
+               rate_idx = 0;
+
+               if (rate < 0x08 || rate > 0x0F)
+                       break;
+
+               switch (band) {
+               case IEEE80211_BAND_2GHZ:
+                       if (cck)
+                               rate &= ~BIT(3);
+                       rate_idx = rx_legacy_rate_idx[rate];
+                       break;
+               case IEEE80211_BAND_5GHZ:
+                       rate_idx = rx_legacy_rate_idx[rate];
+                       /* We are using same rate table registering
+                          HW - ath10k_rates[]. In case of 5GHz skip
+                          CCK rates, so -4 here */
+                       rate_idx -= 4;
+                       break;
+               default:
+                       break;
+               }
+
+               status->rate_idx = rate_idx;
+               break;
+       case HTT_RX_HT:
+       case HTT_RX_HT_WITH_TXBF:
+               /* HT-SIG - Table 20-11 in info1 and info2 */
+               mcs = info1 & 0x1F;
+               nss = mcs >> 3;
+               bw = (info1 >> 7) & 1;
+               sgi = (info2 >> 7) & 1;
+
+               status->rate_idx = mcs;
+               status->flag |= RX_FLAG_HT;
+               if (sgi)
+                       status->flag |= RX_FLAG_SHORT_GI;
+               if (bw)
+                       status->flag |= RX_FLAG_40MHZ;
+               break;
+       case HTT_RX_VHT:
+       case HTT_RX_VHT_WITH_TXBF:
+               /* VHT-SIG-A1 in info 1, VHT-SIG-A2 in info2
+                  TODO check this */
+               mcs = (info2 >> 4) & 0x0F;
+               nss = ((info1 >> 10) & 0x07) + 1;
+               bw = info1 & 3;
+               sgi = info2 & 1;
+
+               status->rate_idx = mcs;
+               status->vht_nss = nss;
+
+               if (sgi)
+                       status->flag |= RX_FLAG_SHORT_GI;
+
+               switch (bw) {
+               /* 20MHZ */
+               case 0:
+                       break;
+               /* 40MHZ */
+               case 1:
+                       status->flag |= RX_FLAG_40MHZ;
+                       break;
+               /* 80MHZ */
+               case 2:
+                       status->vht_flag |= RX_VHT_FLAG_80MHZ;
+               }
+
+               status->flag |= RX_FLAG_VHT;
+               break;
+       default:
+               break;
+       }
+}
+
+static void ath10k_htt_rx_h_protected(struct ath10k_htt *htt,
+                                     struct ieee80211_rx_status *rx_status,
+                                     struct sk_buff *skb,
+                                     enum htt_rx_mpdu_encrypt_type enctype)
+{
+       struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+
+
+       if (enctype == HTT_RX_MPDU_ENCRYPT_NONE) {
+               rx_status->flag &= ~(RX_FLAG_DECRYPTED |
+                                    RX_FLAG_IV_STRIPPED |
+                                    RX_FLAG_MMIC_STRIPPED);
+               return;
+       }
+
+       rx_status->flag |= RX_FLAG_DECRYPTED |
+                          RX_FLAG_IV_STRIPPED |
+                          RX_FLAG_MMIC_STRIPPED;
+       hdr->frame_control = __cpu_to_le16(__le16_to_cpu(hdr->frame_control) &
+                                          ~IEEE80211_FCTL_PROTECTED);
+}
+
+static bool ath10k_htt_rx_h_channel(struct ath10k *ar,
+                                   struct ieee80211_rx_status *status)
+{
+       struct ieee80211_channel *ch;
+
+       spin_lock_bh(&ar->data_lock);
+       ch = ar->scan_channel;
+       if (!ch)
+               ch = ar->rx_channel;
+       spin_unlock_bh(&ar->data_lock);
+
+       if (!ch)
+               return false;
+
+       status->band = ch->band;
+       status->freq = ch->center_freq;
+
+       return true;
+}
+
+static void ath10k_process_rx(struct ath10k *ar,
+                             struct ieee80211_rx_status *rx_status,
+                             struct sk_buff *skb)
+{
+       struct ieee80211_rx_status *status;
+
+       status = IEEE80211_SKB_RXCB(skb);
+       *status = *rx_status;
+
+       ath10k_dbg(ATH10K_DBG_DATA,
+                  "rx skb %p len %u %s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %imic-err %i\n",
+                  skb,
+                  skb->len,
+                  status->flag == 0 ? "legacy" : "",
+                  status->flag & RX_FLAG_HT ? "ht" : "",
+                  status->flag & RX_FLAG_VHT ? "vht" : "",
+                  status->flag & RX_FLAG_40MHZ ? "40" : "",
+                  status->vht_flag & RX_VHT_FLAG_80MHZ ? "80" : "",
+                  status->flag & RX_FLAG_SHORT_GI ? "sgi " : "",
+                  status->rate_idx,
+                  status->vht_nss,
+                  status->freq,
+                  status->band, status->flag,
+                  !!(status->flag & RX_FLAG_FAILED_FCS_CRC),
+                  !!(status->flag & RX_FLAG_MMIC_ERROR));
+       ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "rx skb: ",
+                       skb->data, skb->len);
+
+       ieee80211_rx(ar->hw, skb);
+}
+
 static int ath10k_htt_rx_nwifi_hdrlen(struct ieee80211_hdr *hdr)
 {
        /* nwifi header is padded to 4 bytes. this fixes 4addr rx */
@@ -643,11 +831,12 @@ static int ath10k_htt_rx_nwifi_hdrlen(struct ieee80211_hdr *hdr)
 }
 
 static void ath10k_htt_rx_amsdu(struct ath10k_htt *htt,
-                               struct htt_rx_info *info)
+                               struct ieee80211_rx_status *rx_status,
+                               struct sk_buff *skb_in)
 {
        struct htt_rx_desc *rxd;
+       struct sk_buff *skb = skb_in;
        struct sk_buff *first;
-       struct sk_buff *skb = info->skb;
        enum rx_msdu_decap_format fmt;
        enum htt_rx_mpdu_encrypt_type enctype;
        struct ieee80211_hdr *hdr;
@@ -728,24 +917,27 @@ static void ath10k_htt_rx_amsdu(struct ath10k_htt *htt,
                        break;
                }
 
-               info->skb = skb;
-               info->encrypt_type = enctype;
+               skb_in = skb;
+               ath10k_htt_rx_h_protected(htt, rx_status, skb_in, enctype);
                skb = skb->next;
-               info->skb->next = NULL;
+               skb_in->next = NULL;
 
                if (skb)
-                       info->amsdu_more = true;
+                       rx_status->flag |= RX_FLAG_AMSDU_MORE;
+               else
+                       rx_status->flag &= ~RX_FLAG_AMSDU_MORE;
 
-               ath10k_process_rx(htt->ar, info);
+               ath10k_process_rx(htt->ar, rx_status, skb_in);
        }
 
        /* FIXME: It might be nice to re-assemble the A-MSDU when there's a
         * monitor interface active for sniffing purposes. */
 }
 
-static void ath10k_htt_rx_msdu(struct ath10k_htt *htt, struct htt_rx_info *info)
+static void ath10k_htt_rx_msdu(struct ath10k_htt *htt,
+                              struct ieee80211_rx_status *rx_status,
+                              struct sk_buff *skb)
 {
-       struct sk_buff *skb = info->skb;
        struct htt_rx_desc *rxd;
        struct ieee80211_hdr *hdr;
        enum rx_msdu_decap_format fmt;
@@ -808,66 +1000,9 @@ static void ath10k_htt_rx_msdu(struct ath10k_htt *htt, struct htt_rx_info *info)
                break;
        }
 
-       info->skb = skb;
-       info->encrypt_type = enctype;
-
-       ath10k_process_rx(htt->ar, info);
-}
-
-static bool ath10k_htt_rx_has_decrypt_err(struct sk_buff *skb)
-{
-       struct htt_rx_desc *rxd;
-       u32 flags;
-
-       rxd = (void *)skb->data - sizeof(*rxd);
-       flags = __le32_to_cpu(rxd->attention.flags);
-
-       if (flags & RX_ATTENTION_FLAGS_DECRYPT_ERR)
-               return true;
-
-       return false;
-}
-
-static bool ath10k_htt_rx_has_fcs_err(struct sk_buff *skb)
-{
-       struct htt_rx_desc *rxd;
-       u32 flags;
-
-       rxd = (void *)skb->data - sizeof(*rxd);
-       flags = __le32_to_cpu(rxd->attention.flags);
-
-       if (flags & RX_ATTENTION_FLAGS_FCS_ERR)
-               return true;
-
-       return false;
-}
-
-static bool ath10k_htt_rx_has_mic_err(struct sk_buff *skb)
-{
-       struct htt_rx_desc *rxd;
-       u32 flags;
-
-       rxd = (void *)skb->data - sizeof(*rxd);
-       flags = __le32_to_cpu(rxd->attention.flags);
-
-       if (flags & RX_ATTENTION_FLAGS_TKIP_MIC_ERR)
-               return true;
-
-       return false;
-}
-
-static bool ath10k_htt_rx_is_mgmt(struct sk_buff *skb)
-{
-       struct htt_rx_desc *rxd;
-       u32 flags;
-
-       rxd = (void *)skb->data - sizeof(*rxd);
-       flags = __le32_to_cpu(rxd->attention.flags);
-
-       if (flags & RX_ATTENTION_FLAGS_MGMT_TYPE)
-               return true;
+       ath10k_htt_rx_h_protected(htt, rx_status, skb, enctype);
 
-       return false;
+       ath10k_process_rx(htt->ar, rx_status, skb);
 }
 
 static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb)
@@ -952,21 +1087,73 @@ static int ath10k_unchain_msdu(struct sk_buff *msdu_head)
        return 0;
 }
 
+static bool ath10k_htt_rx_amsdu_allowed(struct ath10k_htt *htt,
+                                       struct sk_buff *head,
+                                       enum htt_rx_mpdu_status status,
+                                       bool channel_set,
+                                       u32 attention)
+{
+       if (head->len == 0) {
+               ath10k_dbg(ATH10K_DBG_HTT,
+                          "htt rx dropping due to zero-len\n");
+               return false;
+       }
+
+       if (attention & RX_ATTENTION_FLAGS_DECRYPT_ERR) {
+               ath10k_dbg(ATH10K_DBG_HTT,
+                          "htt rx dropping due to decrypt-err\n");
+               return false;
+       }
+
+       if (!channel_set) {
+               ath10k_warn("no channel configured; ignoring frame!\n");
+               return false;
+       }
+
+       /* Skip mgmt frames while we handle this in WMI */
+       if (status == HTT_RX_IND_MPDU_STATUS_MGMT_CTRL ||
+           attention & RX_ATTENTION_FLAGS_MGMT_TYPE) {
+               ath10k_dbg(ATH10K_DBG_HTT, "htt rx mgmt ctrl\n");
+               return false;
+       }
+
+       if (status != HTT_RX_IND_MPDU_STATUS_OK &&
+           status != HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR &&
+           status != HTT_RX_IND_MPDU_STATUS_ERR_INV_PEER &&
+           !htt->ar->monitor_started) {
+               ath10k_dbg(ATH10K_DBG_HTT,
+                          "htt rx ignoring frame w/ status %d\n",
+                          status);
+               return false;
+       }
+
+       if (test_bit(ATH10K_CAC_RUNNING, &htt->ar->dev_flags)) {
+               ath10k_dbg(ATH10K_DBG_HTT,
+                          "htt rx CAC running\n");
+               return false;
+       }
+
+       return true;
+}
+
 static void ath10k_htt_rx_handler(struct ath10k_htt *htt,
                                  struct htt_rx_indication *rx)
 {
-       struct htt_rx_info info;
+       struct ieee80211_rx_status *rx_status = &htt->rx_status;
        struct htt_rx_indication_mpdu_range *mpdu_ranges;
+       struct htt_rx_desc *rxd;
+       enum htt_rx_mpdu_status status;
        struct ieee80211_hdr *hdr;
        int num_mpdu_ranges;
+       u32 attention;
        int fw_desc_len;
        u8 *fw_desc;
+       bool channel_set;
        int i, j;
+       int ret;
 
        lockdep_assert_held(&htt->rx_ring.lock);
 
-       memset(&info, 0, sizeof(info));
-
        fw_desc_len = __le16_to_cpu(rx->prefix.fw_rx_desc_bytes);
        fw_desc = (u8 *)&rx->fw_desc;
 
@@ -974,106 +1161,90 @@ static void ath10k_htt_rx_handler(struct ath10k_htt *htt,
                             HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES);
        mpdu_ranges = htt_rx_ind_get_mpdu_ranges(rx);
 
+       /* Fill this once, while this is per-ppdu */
+       if (rx->ppdu.info0 & HTT_RX_INDICATION_INFO0_START_VALID) {
+               memset(rx_status, 0, sizeof(*rx_status));
+               rx_status->signal  = ATH10K_DEFAULT_NOISE_FLOOR +
+                                    rx->ppdu.combined_rssi;
+       }
+
+       if (rx->ppdu.info0 & HTT_RX_INDICATION_INFO0_END_VALID) {
+               /* TSF available only in 32-bit */
+               rx_status->mactime = __le32_to_cpu(rx->ppdu.tsf) & 0xffffffff;
+               rx_status->flag |= RX_FLAG_MACTIME_END;
+       }
+
+       channel_set = ath10k_htt_rx_h_channel(htt->ar, rx_status);
+
+       if (channel_set) {
+               ath10k_htt_rx_h_rates(htt->ar, rx_status->band,
+                                     rx->ppdu.info0,
+                                     __le32_to_cpu(rx->ppdu.info1),
+                                     __le32_to_cpu(rx->ppdu.info2),
+                                     rx_status);
+       }
+
        ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt rx ind: ",
                        rx, sizeof(*rx) +
                        (sizeof(struct htt_rx_indication_mpdu_range) *
                                num_mpdu_ranges));
 
        for (i = 0; i < num_mpdu_ranges; i++) {
-               info.status = mpdu_ranges[i].mpdu_range_status;
+               status = mpdu_ranges[i].mpdu_range_status;
 
                for (j = 0; j < mpdu_ranges[i].mpdu_count; j++) {
                        struct sk_buff *msdu_head, *msdu_tail;
-                       enum htt_rx_mpdu_status status;
-                       int msdu_chaining;
 
                        msdu_head = NULL;
                        msdu_tail = NULL;
-                       msdu_chaining = ath10k_htt_rx_amsdu_pop(htt,
-                                                        &fw_desc,
-                                                        &fw_desc_len,
-                                                        &msdu_head,
-                                                        &msdu_tail);
-
-                       if (!msdu_head) {
-                               ath10k_warn("htt rx no data!\n");
-                               continue;
-                       }
-
-                       if (msdu_head->len == 0) {
-                               ath10k_dbg(ATH10K_DBG_HTT,
-                                          "htt rx dropping due to zero-len\n");
-                               ath10k_htt_rx_free_msdu_chain(msdu_head);
-                               continue;
-                       }
-
-                       if (ath10k_htt_rx_has_decrypt_err(msdu_head)) {
-                               ath10k_dbg(ATH10K_DBG_HTT,
-                                          "htt rx dropping due to decrypt-err\n");
+                       ret = ath10k_htt_rx_amsdu_pop(htt,
+                                                     &fw_desc,
+                                                     &fw_desc_len,
+                                                     &msdu_head,
+                                                     &msdu_tail);
+
+                       if (ret < 0) {
+                               ath10k_warn("failed to pop amsdu from htt rx ring %d\n",
+                                           ret);
                                ath10k_htt_rx_free_msdu_chain(msdu_head);
                                continue;
                        }
 
-                       status = info.status;
-
-                       /* Skip mgmt frames while we handle this in WMI */
-                       if (status == HTT_RX_IND_MPDU_STATUS_MGMT_CTRL ||
-                           ath10k_htt_rx_is_mgmt(msdu_head)) {
-                               ath10k_dbg(ATH10K_DBG_HTT, "htt rx mgmt ctrl\n");
-                               ath10k_htt_rx_free_msdu_chain(msdu_head);
-                               continue;
-                       }
+                       rxd = container_of((void *)msdu_head->data,
+                                          struct htt_rx_desc,
+                                          msdu_payload);
+                       attention = __le32_to_cpu(rxd->attention.flags);
 
-                       if (status != HTT_RX_IND_MPDU_STATUS_OK &&
-                           status != HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR &&
-                           status != HTT_RX_IND_MPDU_STATUS_ERR_INV_PEER &&
-                           !htt->ar->monitor_enabled) {
-                               ath10k_dbg(ATH10K_DBG_HTT,
-                                          "htt rx ignoring frame w/ status %d\n",
-                                          status);
+                       if (!ath10k_htt_rx_amsdu_allowed(htt, msdu_head,
+                                                        status,
+                                                        channel_set,
+                                                        attention)) {
                                ath10k_htt_rx_free_msdu_chain(msdu_head);
                                continue;
                        }
 
-                       if (test_bit(ATH10K_CAC_RUNNING, &htt->ar->dev_flags)) {
-                               ath10k_dbg(ATH10K_DBG_HTT,
-                                          "htt rx CAC running\n");
+                       if (ret > 0 &&
+                           ath10k_unchain_msdu(msdu_head) < 0) {
                                ath10k_htt_rx_free_msdu_chain(msdu_head);
                                continue;
                        }
 
-                       if (msdu_chaining &&
-                           (ath10k_unchain_msdu(msdu_head) < 0)) {
-                               ath10k_htt_rx_free_msdu_chain(msdu_head);
-                               continue;
-                       }
-
-                       info.skb     = msdu_head;
-                       info.fcs_err = ath10k_htt_rx_has_fcs_err(msdu_head);
-                       info.mic_err = ath10k_htt_rx_has_mic_err(msdu_head);
-
-                       if (info.fcs_err)
-                               ath10k_dbg(ATH10K_DBG_HTT,
-                                          "htt rx has FCS err\n");
-
-                       if (info.mic_err)
-                               ath10k_dbg(ATH10K_DBG_HTT,
-                                          "htt rx has MIC err\n");
-
-                       info.signal  = ATH10K_DEFAULT_NOISE_FLOOR;
-                       info.signal += rx->ppdu.combined_rssi;
+                       if (attention & RX_ATTENTION_FLAGS_FCS_ERR)
+                               rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
+                       else
+                               rx_status->flag &= ~RX_FLAG_FAILED_FCS_CRC;
 
-                       info.rate.info0 = rx->ppdu.info0;
-                       info.rate.info1 = __le32_to_cpu(rx->ppdu.info1);
-                       info.rate.info2 = __le32_to_cpu(rx->ppdu.info2);
-                       info.tsf = __le32_to_cpu(rx->ppdu.tsf);
+                       if (attention & RX_ATTENTION_FLAGS_TKIP_MIC_ERR)
+                               rx_status->flag |= RX_FLAG_MMIC_ERROR;
+                       else
+                               rx_status->flag &= ~RX_FLAG_MMIC_ERROR;
 
                        hdr = ath10k_htt_rx_skb_get_hdr(msdu_head);
 
                        if (ath10k_htt_rx_hdr_is_amsdu(hdr))
-                               ath10k_htt_rx_amsdu(htt, &info);
+                               ath10k_htt_rx_amsdu(htt, rx_status, msdu_head);
                        else
-                               ath10k_htt_rx_msdu(htt, &info);
+                               ath10k_htt_rx_msdu(htt, rx_status, msdu_head);
                }
        }
 
@@ -1084,11 +1255,12 @@ static void ath10k_htt_rx_frag_handler(struct ath10k_htt *htt,
                                struct htt_rx_fragment_indication *frag)
 {
        struct sk_buff *msdu_head, *msdu_tail;
+       enum htt_rx_mpdu_encrypt_type enctype;
        struct htt_rx_desc *rxd;
        enum rx_msdu_decap_format fmt;
-       struct htt_rx_info info = {};
+       struct ieee80211_rx_status *rx_status = &htt->rx_status;
        struct ieee80211_hdr *hdr;
-       int msdu_chaining;
+       int ret;
        bool tkip_mic_err;
        bool decrypt_err;
        u8 *fw_desc;
@@ -1102,19 +1274,15 @@ static void ath10k_htt_rx_frag_handler(struct ath10k_htt *htt,
        msdu_tail = NULL;
 
        spin_lock_bh(&htt->rx_ring.lock);
-       msdu_chaining = ath10k_htt_rx_amsdu_pop(htt, &fw_desc, &fw_desc_len,
-                                               &msdu_head, &msdu_tail);
+       ret = ath10k_htt_rx_amsdu_pop(htt, &fw_desc, &fw_desc_len,
+                                     &msdu_head, &msdu_tail);
        spin_unlock_bh(&htt->rx_ring.lock);
 
        ath10k_dbg(ATH10K_DBG_HTT_DUMP, "htt rx frag ahead\n");
 
-       if (!msdu_head) {
-               ath10k_warn("htt rx frag no data\n");
-               return;
-       }
-
-       if (msdu_chaining || msdu_head != msdu_tail) {
-               ath10k_warn("aggregation with fragmentation?!\n");
+       if (ret) {
+               ath10k_warn("failed to pop amsdu from httr rx ring for fragmented rx %d\n",
+                           ret);
                ath10k_htt_rx_free_msdu_chain(msdu_head);
                return;
        }
@@ -1136,57 +1304,54 @@ static void ath10k_htt_rx_frag_handler(struct ath10k_htt *htt,
                goto end;
        }
 
-       info.skb = msdu_head;
-       info.status = HTT_RX_IND_MPDU_STATUS_OK;
-       info.encrypt_type = MS(__le32_to_cpu(rxd->mpdu_start.info0),
-                               RX_MPDU_START_INFO0_ENCRYPT_TYPE);
-       info.skb->ip_summed = ath10k_htt_rx_get_csum_state(info.skb);
+       enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0),
+                    RX_MPDU_START_INFO0_ENCRYPT_TYPE);
+       ath10k_htt_rx_h_protected(htt, rx_status, msdu_head, enctype);
+       msdu_head->ip_summed = ath10k_htt_rx_get_csum_state(msdu_head);
 
-       if (tkip_mic_err) {
+       if (tkip_mic_err)
                ath10k_warn("tkip mic error\n");
-               info.status = HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR;
-       }
 
        if (decrypt_err) {
                ath10k_warn("decryption err in fragmented rx\n");
-               dev_kfree_skb_any(info.skb);
+               dev_kfree_skb_any(msdu_head);
                goto end;
        }
 
-       if (info.encrypt_type != HTT_RX_MPDU_ENCRYPT_NONE) {
+       if (enctype != HTT_RX_MPDU_ENCRYPT_NONE) {
                hdrlen = ieee80211_hdrlen(hdr->frame_control);
-               paramlen = ath10k_htt_rx_crypto_param_len(info.encrypt_type);
+               paramlen = ath10k_htt_rx_crypto_param_len(enctype);
 
                /* It is more efficient to move the header than the payload */
-               memmove((void *)info.skb->data + paramlen,
-                       (void *)info.skb->data,
+               memmove((void *)msdu_head->data + paramlen,
+                       (void *)msdu_head->data,
                        hdrlen);
-               skb_pull(info.skb, paramlen);
-               hdr = (struct ieee80211_hdr *)info.skb->data;
+               skb_pull(msdu_head, paramlen);
+               hdr = (struct ieee80211_hdr *)msdu_head->data;
        }
 
        /* remove trailing FCS */
        trim  = 4;
 
        /* remove crypto trailer */
-       trim += ath10k_htt_rx_crypto_tail_len(info.encrypt_type);
+       trim += ath10k_htt_rx_crypto_tail_len(enctype);
 
        /* last fragment of TKIP frags has MIC */
        if (!ieee80211_has_morefrags(hdr->frame_control) &&
-           info.encrypt_type == HTT_RX_MPDU_ENCRYPT_TKIP_WPA)
+           enctype == HTT_RX_MPDU_ENCRYPT_TKIP_WPA)
                trim += 8;
 
-       if (trim > info.skb->len) {
+       if (trim > msdu_head->len) {
                ath10k_warn("htt rx fragment: trailer longer than the frame itself? drop\n");
-               dev_kfree_skb_any(info.skb);
+               dev_kfree_skb_any(msdu_head);
                goto end;
        }
 
-       skb_trim(info.skb, info.skb->len - trim);
+       skb_trim(msdu_head, msdu_head->len - trim);
 
        ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt rx frag mpdu: ",
-                       info.skb->data, info.skb->len);
-       ath10k_process_rx(htt->ar, &info);
+                       msdu_head->data, msdu_head->len);
+       ath10k_process_rx(htt->ar, rx_status, msdu_head);
 
 end:
        if (fw_desc_len > 0) {
index 35fc44e281f57968171283d7d336cce5b20eddac..007e855f4ba99f9067725a11b85fdeadb3412483 100644 (file)
@@ -28,6 +28,7 @@
 #define QCA988X_HW_2_0_CHIP_ID_REV     0x2
 #define QCA988X_HW_2_0_FW_DIR          "ath10k/QCA988X/hw2.0"
 #define QCA988X_HW_2_0_FW_FILE         "firmware.bin"
+#define QCA988X_HW_2_0_FW_2_FILE       "firmware-2.bin"
 #define QCA988X_HW_2_0_OTP_FILE                "otp.bin"
 #define QCA988X_HW_2_0_BOARD_DATA_FILE "board.bin"
 #define QCA988X_HW_2_0_PATCH_LOAD_ADDR 0x1234
index 511a2f81e7afc190419623235cdbefe9a66e4039..0ac5437492fd5c100b95f7f576491d2d6d5d0025 100644 (file)
@@ -165,7 +165,7 @@ static int ath10k_clear_peer_keys(struct ath10k_vif *arvif,
                        first_errno = ret;
 
                if (ret)
-                       ath10k_warn("could not remove peer wep key %d (%d)\n",
+                       ath10k_warn("failed to remove peer wep key %d: %d\n",
                                    i, ret);
 
                peer->keys[i] = NULL;
@@ -213,7 +213,8 @@ static int ath10k_clear_vdev_key(struct ath10k_vif *arvif,
                        first_errno = ret;
 
                if (ret)
-                       ath10k_warn("could not remove key for %pM\n", addr);
+                       ath10k_warn("failed to remove key for %pM: %d\n",
+                                   addr, ret);
        }
 
        return first_errno;
@@ -323,14 +324,14 @@ static int ath10k_peer_create(struct ath10k *ar, u32 vdev_id, const u8 *addr)
 
        ret = ath10k_wmi_peer_create(ar, vdev_id, addr);
        if (ret) {
-               ath10k_warn("Failed to create wmi peer %pM on vdev %i: %i\n",
+               ath10k_warn("failed to create wmi peer %pM on vdev %i: %i\n",
                            addr, vdev_id, ret);
                return ret;
        }
 
        ret = ath10k_wait_for_peer_created(ar, vdev_id, addr);
        if (ret) {
-               ath10k_warn("Failed to wait for created wmi peer %pM on vdev %i: %i\n",
+               ath10k_warn("failed to wait for created wmi peer %pM on vdev %i: %i\n",
                            addr, vdev_id, ret);
                return ret;
        }
@@ -351,7 +352,7 @@ static int ath10k_mac_set_kickout(struct ath10k_vif *arvif)
        ret = ath10k_wmi_pdev_set_param(ar, param,
                                        ATH10K_KICKOUT_THRESHOLD);
        if (ret) {
-               ath10k_warn("Failed to set kickout threshold on vdev %i: %d\n",
+               ath10k_warn("failed to set kickout threshold on vdev %i: %d\n",
                            arvif->vdev_id, ret);
                return ret;
        }
@@ -360,7 +361,7 @@ static int ath10k_mac_set_kickout(struct ath10k_vif *arvif)
        ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param,
                                        ATH10K_KEEPALIVE_MIN_IDLE);
        if (ret) {
-               ath10k_warn("Failed to set keepalive minimum idle time on vdev %i : %d\n",
+               ath10k_warn("failed to set keepalive minimum idle time on vdev %i: %d\n",
                            arvif->vdev_id, ret);
                return ret;
        }
@@ -369,7 +370,7 @@ static int ath10k_mac_set_kickout(struct ath10k_vif *arvif)
        ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param,
                                        ATH10K_KEEPALIVE_MAX_IDLE);
        if (ret) {
-               ath10k_warn("Failed to set keepalive maximum idle time on vdev %i: %d\n",
+               ath10k_warn("failed to set keepalive maximum idle time on vdev %i: %d\n",
                            arvif->vdev_id, ret);
                return ret;
        }
@@ -378,7 +379,7 @@ static int ath10k_mac_set_kickout(struct ath10k_vif *arvif)
        ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param,
                                        ATH10K_KEEPALIVE_MAX_UNRESPONSIVE);
        if (ret) {
-               ath10k_warn("Failed to set keepalive maximum unresponsive time on vdev %i: %d\n",
+               ath10k_warn("failed to set keepalive maximum unresponsive time on vdev %i: %d\n",
                            arvif->vdev_id, ret);
                return ret;
        }
@@ -488,92 +489,20 @@ static inline int ath10k_vdev_setup_sync(struct ath10k *ar)
        return 0;
 }
 
-static int ath10k_vdev_start(struct ath10k_vif *arvif)
+static bool ath10k_monitor_is_enabled(struct ath10k *ar)
 {
-       struct ath10k *ar = arvif->ar;
-       struct cfg80211_chan_def *chandef = &ar->chandef;
-       struct wmi_vdev_start_request_arg arg = {};
-       int ret = 0;
-
        lockdep_assert_held(&ar->conf_mutex);
 
-       reinit_completion(&ar->vdev_setup_done);
-
-       arg.vdev_id = arvif->vdev_id;
-       arg.dtim_period = arvif->dtim_period;
-       arg.bcn_intval = arvif->beacon_interval;
-
-       arg.channel.freq = chandef->chan->center_freq;
-       arg.channel.band_center_freq1 = chandef->center_freq1;
-       arg.channel.mode = chan_to_phymode(chandef);
-
-       arg.channel.min_power = 0;
-       arg.channel.max_power = chandef->chan->max_power * 2;
-       arg.channel.max_reg_power = chandef->chan->max_reg_power * 2;
-       arg.channel.max_antenna_gain = chandef->chan->max_antenna_gain * 2;
-
-       if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
-               arg.ssid = arvif->u.ap.ssid;
-               arg.ssid_len = arvif->u.ap.ssid_len;
-               arg.hidden_ssid = arvif->u.ap.hidden_ssid;
-
-               /* For now allow DFS for AP mode */
-               arg.channel.chan_radar =
-                       !!(chandef->chan->flags & IEEE80211_CHAN_RADAR);
-       } else if (arvif->vdev_type == WMI_VDEV_TYPE_IBSS) {
-               arg.ssid = arvif->vif->bss_conf.ssid;
-               arg.ssid_len = arvif->vif->bss_conf.ssid_len;
-       }
-
        ath10k_dbg(ATH10K_DBG_MAC,
-                  "mac vdev %d start center_freq %d phymode %s\n",
-                  arg.vdev_id, arg.channel.freq,
-                  ath10k_wmi_phymode_str(arg.channel.mode));
-
-       ret = ath10k_wmi_vdev_start(ar, &arg);
-       if (ret) {
-               ath10k_warn("WMI vdev %i start failed: ret %d\n",
-                           arg.vdev_id, ret);
-               return ret;
-       }
-
-       ret = ath10k_vdev_setup_sync(ar);
-       if (ret) {
-               ath10k_warn("vdev %i setup failed %d\n",
-                           arg.vdev_id, ret);
-               return ret;
-       }
-
-       return ret;
-}
-
-static int ath10k_vdev_stop(struct ath10k_vif *arvif)
-{
-       struct ath10k *ar = arvif->ar;
-       int ret;
+                  "mac monitor refs: promisc %d monitor %d cac %d\n",
+                  ar->promisc, ar->monitor,
+                  test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags));
 
-       lockdep_assert_held(&ar->conf_mutex);
-
-       reinit_completion(&ar->vdev_setup_done);
-
-       ret = ath10k_wmi_vdev_stop(ar, arvif->vdev_id);
-       if (ret) {
-               ath10k_warn("WMI vdev %i stop failed: ret %d\n",
-                           arvif->vdev_id, ret);
-               return ret;
-       }
-
-       ret = ath10k_vdev_setup_sync(ar);
-       if (ret) {
-               ath10k_warn("vdev %i setup sync failed %d\n",
-                           arvif->vdev_id, ret);
-               return ret;
-       }
-
-       return ret;
+       return ar->promisc || ar->monitor ||
+              test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags);
 }
 
-static int ath10k_monitor_start(struct ath10k *ar, int vdev_id)
+static int ath10k_monitor_vdev_start(struct ath10k *ar, int vdev_id)
 {
        struct cfg80211_chan_def *chandef = &ar->chandef;
        struct ieee80211_channel *channel = chandef->chan;
@@ -582,11 +511,6 @@ static int ath10k_monitor_start(struct ath10k *ar, int vdev_id)
 
        lockdep_assert_held(&ar->conf_mutex);
 
-       if (!ar->monitor_present) {
-               ath10k_warn("mac montor stop -- monitor is not present\n");
-               return -EINVAL;
-       }
-
        arg.vdev_id = vdev_id;
        arg.channel.freq = channel->center_freq;
        arg.channel.band_center_freq1 = chandef->center_freq1;
@@ -604,88 +528,75 @@ static int ath10k_monitor_start(struct ath10k *ar, int vdev_id)
 
        ret = ath10k_wmi_vdev_start(ar, &arg);
        if (ret) {
-               ath10k_warn("Monitor vdev %i start failed: ret %d\n",
+               ath10k_warn("failed to request monitor vdev %i start: %d\n",
                            vdev_id, ret);
                return ret;
        }
 
        ret = ath10k_vdev_setup_sync(ar);
        if (ret) {
-               ath10k_warn("Monitor vdev %i setup failed %d\n",
+               ath10k_warn("failed to synchronize setup for monitor vdev %i: %d\n",
                            vdev_id, ret);
                return ret;
        }
 
        ret = ath10k_wmi_vdev_up(ar, vdev_id, 0, ar->mac_addr);
        if (ret) {
-               ath10k_warn("Monitor vdev %i up failed: %d\n",
+               ath10k_warn("failed to put up monitor vdev %i: %d\n",
                            vdev_id, ret);
                goto vdev_stop;
        }
 
        ar->monitor_vdev_id = vdev_id;
-       ar->monitor_enabled = true;
 
+       ath10k_dbg(ATH10K_DBG_MAC, "mac monitor vdev %i started\n",
+                  ar->monitor_vdev_id);
        return 0;
 
 vdev_stop:
        ret = ath10k_wmi_vdev_stop(ar, ar->monitor_vdev_id);
        if (ret)
-               ath10k_warn("Monitor vdev %i stop failed: %d\n",
+               ath10k_warn("failed to stop monitor vdev %i after start failure: %d\n",
                            ar->monitor_vdev_id, ret);
 
        return ret;
 }
 
-static int ath10k_monitor_stop(struct ath10k *ar)
+static int ath10k_monitor_vdev_stop(struct ath10k *ar)
 {
        int ret = 0;
 
        lockdep_assert_held(&ar->conf_mutex);
 
-       if (!ar->monitor_present) {
-               ath10k_warn("mac montor stop -- monitor is not present\n");
-               return -EINVAL;
-       }
-
-       if (!ar->monitor_enabled) {
-               ath10k_warn("mac montor stop -- monitor is not enabled\n");
-               return -EINVAL;
-       }
-
        ret = ath10k_wmi_vdev_down(ar, ar->monitor_vdev_id);
        if (ret)
-               ath10k_warn("Monitor vdev %i down failed: %d\n",
+               ath10k_warn("failed to put down monitor vdev %i: %d\n",
                            ar->monitor_vdev_id, ret);
 
        ret = ath10k_wmi_vdev_stop(ar, ar->monitor_vdev_id);
        if (ret)
-               ath10k_warn("Monitor vdev %i stop failed: %d\n",
+               ath10k_warn("failed to to request monitor vdev %i stop: %d\n",
                            ar->monitor_vdev_id, ret);
 
        ret = ath10k_vdev_setup_sync(ar);
        if (ret)
-               ath10k_warn("Monitor_down sync failed, vdev %i: %d\n",
+               ath10k_warn("failed to synchronise monitor vdev %i: %d\n",
                            ar->monitor_vdev_id, ret);
 
-       ar->monitor_enabled = false;
+       ath10k_dbg(ATH10K_DBG_MAC, "mac monitor vdev %i stopped\n",
+                  ar->monitor_vdev_id);
        return ret;
 }
 
-static int ath10k_monitor_create(struct ath10k *ar)
+static int ath10k_monitor_vdev_create(struct ath10k *ar)
 {
        int bit, ret = 0;
 
        lockdep_assert_held(&ar->conf_mutex);
 
-       if (ar->monitor_present) {
-               ath10k_warn("Monitor mode already enabled\n");
-               return 0;
-       }
-
        bit = ffs(ar->free_vdev_map);
        if (bit == 0) {
-               ath10k_warn("No free VDEV slots\n");
+               ath10k_warn("failed to find free vdev id for monitor vdev\n");
                return -ENOMEM;
        }
 
@@ -696,7 +607,7 @@ static int ath10k_monitor_create(struct ath10k *ar)
                                     WMI_VDEV_TYPE_MONITOR,
                                     0, ar->mac_addr);
        if (ret) {
-               ath10k_warn("WMI vdev %i monitor create failed: ret %d\n",
+               ath10k_warn("failed to request monitor vdev %i creation: %d\n",
                            ar->monitor_vdev_id, ret);
                goto vdev_fail;
        }
@@ -704,7 +615,6 @@ static int ath10k_monitor_create(struct ath10k *ar)
        ath10k_dbg(ATH10K_DBG_MAC, "mac monitor vdev %d created\n",
                   ar->monitor_vdev_id);
 
-       ar->monitor_present = true;
        return 0;
 
 vdev_fail:
@@ -715,48 +625,123 @@ vdev_fail:
        return ret;
 }
 
-static int ath10k_monitor_destroy(struct ath10k *ar)
+static int ath10k_monitor_vdev_delete(struct ath10k *ar)
 {
        int ret = 0;
 
        lockdep_assert_held(&ar->conf_mutex);
 
-       if (!ar->monitor_present)
-               return 0;
-
        ret = ath10k_wmi_vdev_delete(ar, ar->monitor_vdev_id);
        if (ret) {
-               ath10k_warn("WMI vdev %i monitor delete failed: %d\n",
+               ath10k_warn("failed to request wmi monitor vdev %i removal: %d\n",
                            ar->monitor_vdev_id, ret);
                return ret;
        }
 
        ar->free_vdev_map |= 1 << (ar->monitor_vdev_id);
-       ar->monitor_present = false;
 
        ath10k_dbg(ATH10K_DBG_MAC, "mac monitor vdev %d deleted\n",
                   ar->monitor_vdev_id);
        return ret;
 }
 
-static int ath10k_start_cac(struct ath10k *ar)
+static int ath10k_monitor_start(struct ath10k *ar)
 {
        int ret;
 
        lockdep_assert_held(&ar->conf_mutex);
 
-       set_bit(ATH10K_CAC_RUNNING, &ar->dev_flags);
+       if (!ath10k_monitor_is_enabled(ar)) {
+               ath10k_warn("trying to start monitor with no references\n");
+               return 0;
+       }
+
+       if (ar->monitor_started) {
+               ath10k_dbg(ATH10K_DBG_MAC, "mac monitor already started\n");
+               return 0;
+       }
 
-       ret = ath10k_monitor_create(ar);
+       ret = ath10k_monitor_vdev_create(ar);
        if (ret) {
-               clear_bit(ATH10K_CAC_RUNNING, &ar->dev_flags);
+               ath10k_warn("failed to create monitor vdev: %d\n", ret);
                return ret;
        }
 
-       ret = ath10k_monitor_start(ar, ar->monitor_vdev_id);
+       ret = ath10k_monitor_vdev_start(ar, ar->monitor_vdev_id);
        if (ret) {
+               ath10k_warn("failed to start monitor vdev: %d\n", ret);
+               ath10k_monitor_vdev_delete(ar);
+               return ret;
+       }
+
+       ar->monitor_started = true;
+       ath10k_dbg(ATH10K_DBG_MAC, "mac monitor started\n");
+
+       return 0;
+}
+
+static void ath10k_monitor_stop(struct ath10k *ar)
+{
+       int ret;
+
+       lockdep_assert_held(&ar->conf_mutex);
+
+       if (ath10k_monitor_is_enabled(ar)) {
+               ath10k_dbg(ATH10K_DBG_MAC,
+                          "mac monitor will be stopped later\n");
+               return;
+       }
+
+       if (!ar->monitor_started) {
+               ath10k_dbg(ATH10K_DBG_MAC,
+                          "mac monitor probably failed to start earlier\n");
+               return;
+       }
+
+       ret = ath10k_monitor_vdev_stop(ar);
+       if (ret)
+               ath10k_warn("failed to stop monitor vdev: %d\n", ret);
+
+       ret = ath10k_monitor_vdev_delete(ar);
+       if (ret)
+               ath10k_warn("failed to delete monitor vdev: %d\n", ret);
+
+       ar->monitor_started = false;
+       ath10k_dbg(ATH10K_DBG_MAC, "mac monitor stopped\n");
+}
+
+static int ath10k_recalc_rtscts_prot(struct ath10k_vif *arvif)
+{
+       struct ath10k *ar = arvif->ar;
+       u32 vdev_param, rts_cts = 0;
+
+       lockdep_assert_held(&ar->conf_mutex);
+
+       vdev_param = ar->wmi.vdev_param->enable_rtscts;
+
+       if (arvif->use_cts_prot || arvif->num_legacy_stations > 0)
+               rts_cts |= SM(WMI_RTSCTS_ENABLED, WMI_RTSCTS_SET);
+
+       if (arvif->num_legacy_stations > 0)
+               rts_cts |= SM(WMI_RTSCTS_ACROSS_SW_RETRIES,
+                             WMI_RTSCTS_PROFILE);
+
+       return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
+                                        rts_cts);
+}
+
+static int ath10k_start_cac(struct ath10k *ar)
+{
+       int ret;
+
+       lockdep_assert_held(&ar->conf_mutex);
+
+       set_bit(ATH10K_CAC_RUNNING, &ar->dev_flags);
+
+       ret = ath10k_monitor_start(ar);
+       if (ret) {
+               ath10k_warn("failed to start monitor (cac): %d\n", ret);
                clear_bit(ATH10K_CAC_RUNNING, &ar->dev_flags);
-               ath10k_monitor_destroy(ar);
                return ret;
        }
 
@@ -774,58 +759,26 @@ static int ath10k_stop_cac(struct ath10k *ar)
        if (!test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags))
                return 0;
 
-       ath10k_monitor_stop(ar);
-       ath10k_monitor_destroy(ar);
        clear_bit(ATH10K_CAC_RUNNING, &ar->dev_flags);
+       ath10k_monitor_stop(ar);
 
        ath10k_dbg(ATH10K_DBG_MAC, "mac cac finished\n");
 
        return 0;
 }
 
-static const char *ath10k_dfs_state(enum nl80211_dfs_state dfs_state)
-{
-       switch (dfs_state) {
-       case NL80211_DFS_USABLE:
-               return "USABLE";
-       case NL80211_DFS_UNAVAILABLE:
-               return "UNAVAILABLE";
-       case NL80211_DFS_AVAILABLE:
-               return "AVAILABLE";
-       default:
-               WARN_ON(1);
-               return "bug";
-       }
-}
-
-static void ath10k_config_radar_detection(struct ath10k *ar)
+static void ath10k_recalc_radar_detection(struct ath10k *ar)
 {
-       struct ieee80211_channel *chan = ar->hw->conf.chandef.chan;
-       bool radar = ar->hw->conf.radar_enabled;
-       bool chan_radar = !!(chan->flags & IEEE80211_CHAN_RADAR);
-       enum nl80211_dfs_state dfs_state = chan->dfs_state;
        int ret;
 
        lockdep_assert_held(&ar->conf_mutex);
 
-       ath10k_dbg(ATH10K_DBG_MAC,
-                  "mac radar config update: chan %dMHz radar %d chan radar %d chan state %s\n",
-                  chan->center_freq, radar, chan_radar,
-                  ath10k_dfs_state(dfs_state));
-
-       /*
-        * It's safe to call it even if CAC is not started.
-        * This call here guarantees changing channel, etc. will stop CAC.
-        */
        ath10k_stop_cac(ar);
 
-       if (!radar)
-               return;
-
-       if (!chan_radar)
+       if (!ar->radar_enabled)
                return;
 
-       if (dfs_state != NL80211_DFS_USABLE)
+       if (ar->num_started_vdevs > 0)
                return;
 
        ret = ath10k_start_cac(ar);
@@ -835,11 +788,106 @@ static void ath10k_config_radar_detection(struct ath10k *ar)
                 * radiation is not allowed, make this channel DFS_UNAVAILABLE
                 * by indicating that radar was detected.
                 */
-               ath10k_warn("failed to start CAC (%d)\n", ret);
+               ath10k_warn("failed to start CAC: %d\n", ret);
                ieee80211_radar_detected(ar->hw);
        }
 }
 
+static int ath10k_vdev_start(struct ath10k_vif *arvif)
+{
+       struct ath10k *ar = arvif->ar;
+       struct cfg80211_chan_def *chandef = &ar->chandef;
+       struct wmi_vdev_start_request_arg arg = {};
+       int ret = 0;
+
+       lockdep_assert_held(&ar->conf_mutex);
+
+       reinit_completion(&ar->vdev_setup_done);
+
+       arg.vdev_id = arvif->vdev_id;
+       arg.dtim_period = arvif->dtim_period;
+       arg.bcn_intval = arvif->beacon_interval;
+
+       arg.channel.freq = chandef->chan->center_freq;
+       arg.channel.band_center_freq1 = chandef->center_freq1;
+       arg.channel.mode = chan_to_phymode(chandef);
+
+       arg.channel.min_power = 0;
+       arg.channel.max_power = chandef->chan->max_power * 2;
+       arg.channel.max_reg_power = chandef->chan->max_reg_power * 2;
+       arg.channel.max_antenna_gain = chandef->chan->max_antenna_gain * 2;
+
+       if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
+               arg.ssid = arvif->u.ap.ssid;
+               arg.ssid_len = arvif->u.ap.ssid_len;
+               arg.hidden_ssid = arvif->u.ap.hidden_ssid;
+
+               /* For now allow DFS for AP mode */
+               arg.channel.chan_radar =
+                       !!(chandef->chan->flags & IEEE80211_CHAN_RADAR);
+       } else if (arvif->vdev_type == WMI_VDEV_TYPE_IBSS) {
+               arg.ssid = arvif->vif->bss_conf.ssid;
+               arg.ssid_len = arvif->vif->bss_conf.ssid_len;
+       }
+
+       ath10k_dbg(ATH10K_DBG_MAC,
+                  "mac vdev %d start center_freq %d phymode %s\n",
+                  arg.vdev_id, arg.channel.freq,
+                  ath10k_wmi_phymode_str(arg.channel.mode));
+
+       ret = ath10k_wmi_vdev_start(ar, &arg);
+       if (ret) {
+               ath10k_warn("failed to start WMI vdev %i: %d\n",
+                           arg.vdev_id, ret);
+               return ret;
+       }
+
+       ret = ath10k_vdev_setup_sync(ar);
+       if (ret) {
+               ath10k_warn("failed to synchronise setup for vdev %i: %d\n",
+                           arg.vdev_id, ret);
+               return ret;
+       }
+
+       ar->num_started_vdevs++;
+       ath10k_recalc_radar_detection(ar);
+
+       return ret;
+}
+
+static int ath10k_vdev_stop(struct ath10k_vif *arvif)
+{
+       struct ath10k *ar = arvif->ar;
+       int ret;
+
+       lockdep_assert_held(&ar->conf_mutex);
+
+       reinit_completion(&ar->vdev_setup_done);
+
+       ret = ath10k_wmi_vdev_stop(ar, arvif->vdev_id);
+       if (ret) {
+               ath10k_warn("failed to stop WMI vdev %i: %d\n",
+                           arvif->vdev_id, ret);
+               return ret;
+       }
+
+       ret = ath10k_vdev_setup_sync(ar);
+       if (ret) {
+               ath10k_warn("failed to syncronise setup for vdev %i: %d\n",
+                           arvif->vdev_id, ret);
+               return ret;
+       }
+
+       WARN_ON(ar->num_started_vdevs == 0);
+
+       if (ar->num_started_vdevs != 0) {
+               ar->num_started_vdevs--;
+               ath10k_recalc_radar_detection(ar);
+       }
+
+       return ret;
+}
+
 static void ath10k_control_beaconing(struct ath10k_vif *arvif,
                                struct ieee80211_bss_conf *info)
 {
@@ -880,7 +928,7 @@ static void ath10k_control_beaconing(struct ath10k_vif *arvif,
        ret = ath10k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid,
                                 arvif->bssid);
        if (ret) {
-               ath10k_warn("Failed to bring up vdev %d: %i\n",
+               ath10k_warn("failed to bring up vdev %d: %i\n",
                            arvif->vdev_id, ret);
                ath10k_vdev_stop(arvif);
                return;
@@ -904,7 +952,7 @@ static void ath10k_control_ibss(struct ath10k_vif *arvif,
        if (!info->ibss_joined) {
                ret = ath10k_peer_delete(arvif->ar, arvif->vdev_id, self_peer);
                if (ret)
-                       ath10k_warn("Failed to delete IBSS self peer:%pM for VDEV:%d ret:%d\n",
+                       ath10k_warn("failed to delete IBSS self peer %pM for vdev %d: %d\n",
                                    self_peer, arvif->vdev_id, ret);
 
                if (is_zero_ether_addr(arvif->bssid))
@@ -913,7 +961,7 @@ static void ath10k_control_ibss(struct ath10k_vif *arvif,
                ret = ath10k_peer_delete(arvif->ar, arvif->vdev_id,
                                         arvif->bssid);
                if (ret) {
-                       ath10k_warn("Failed to delete IBSS BSSID peer:%pM for VDEV:%d ret:%d\n",
+                       ath10k_warn("failed to delete IBSS BSSID peer %pM for vdev %d: %d\n",
                                    arvif->bssid, arvif->vdev_id, ret);
                        return;
                }
@@ -925,7 +973,7 @@ static void ath10k_control_ibss(struct ath10k_vif *arvif,
 
        ret = ath10k_peer_create(arvif->ar, arvif->vdev_id, self_peer);
        if (ret) {
-               ath10k_warn("Failed to create IBSS self peer:%pM for VDEV:%d ret:%d\n",
+               ath10k_warn("failed to create IBSS self peer %pM for vdev %d: %d\n",
                            self_peer, arvif->vdev_id, ret);
                return;
        }
@@ -934,7 +982,7 @@ static void ath10k_control_ibss(struct ath10k_vif *arvif,
        ret = ath10k_wmi_vdev_set_param(arvif->ar, arvif->vdev_id, vdev_param,
                                        ATH10K_DEFAULT_ATIM);
        if (ret)
-               ath10k_warn("Failed to set IBSS ATIM for VDEV:%d ret:%d\n",
+               ath10k_warn("failed to set IBSS ATIM for vdev %d: %d\n",
                            arvif->vdev_id, ret);
 }
 
@@ -961,7 +1009,7 @@ static int ath10k_mac_vif_setup_ps(struct ath10k_vif *arvif)
                ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id, param,
                                                  conf->dynamic_ps_timeout);
                if (ret) {
-                       ath10k_warn("Failed to set inactivity time for vdev %d: %i\n",
+                       ath10k_warn("failed to set inactivity time for vdev %d: %i\n",
                                    arvif->vdev_id, ret);
                        return ret;
                }
@@ -974,8 +1022,8 @@ static int ath10k_mac_vif_setup_ps(struct ath10k_vif *arvif)
 
        ret = ath10k_wmi_set_psmode(ar, arvif->vdev_id, psmode);
        if (ret) {
-               ath10k_warn("Failed to set PS Mode: %d for VDEV: %d\n",
-                           psmode, arvif->vdev_id);
+               ath10k_warn("failed to set PS Mode %d for vdev %d: %d\n",
+                           psmode, arvif->vdev_id, ret);
                return ret;
        }
 
@@ -1429,7 +1477,7 @@ static void ath10k_bss_assoc(struct ieee80211_hw *hw,
 
        ap_sta = ieee80211_find_sta(vif, bss_conf->bssid);
        if (!ap_sta) {
-               ath10k_warn("Failed to find station entry for %pM, vdev %i\n",
+               ath10k_warn("failed to find station entry for bss %pM vdev %i\n",
                            bss_conf->bssid, arvif->vdev_id);
                rcu_read_unlock();
                return;
@@ -1442,7 +1490,7 @@ static void ath10k_bss_assoc(struct ieee80211_hw *hw,
        ret = ath10k_peer_assoc_prepare(ar, arvif, ap_sta,
                                        bss_conf, &peer_arg);
        if (ret) {
-               ath10k_warn("Peer assoc prepare failed for %pM vdev %i\n: %d",
+               ath10k_warn("failed to prepare peer assoc for %pM vdev %i: %d\n",
                            bss_conf->bssid, arvif->vdev_id, ret);
                rcu_read_unlock();
                return;
@@ -1452,7 +1500,7 @@ static void ath10k_bss_assoc(struct ieee80211_hw *hw,
 
        ret = ath10k_wmi_peer_assoc(ar, &peer_arg);
        if (ret) {
-               ath10k_warn("Peer assoc failed for %pM vdev %i\n: %d",
+               ath10k_warn("failed to run peer assoc for %pM vdev %i: %d\n",
                            bss_conf->bssid, arvif->vdev_id, ret);
                return;
        }
@@ -1473,7 +1521,7 @@ static void ath10k_bss_assoc(struct ieee80211_hw *hw,
 
        ret = ath10k_wmi_vdev_up(ar, arvif->vdev_id, arvif->aid, arvif->bssid);
        if (ret) {
-               ath10k_warn("VDEV: %d up failed: ret %d\n",
+               ath10k_warn("failed to set vdev %d up: %d\n",
                            arvif->vdev_id, ret);
                return;
        }
@@ -1524,7 +1572,7 @@ static void ath10k_bss_disassoc(struct ieee80211_hw *hw,
 }
 
 static int ath10k_station_assoc(struct ath10k *ar, struct ath10k_vif *arvif,
-                               struct ieee80211_sta *sta)
+                               struct ieee80211_sta *sta, bool reassoc)
 {
        struct wmi_peer_assoc_complete_arg peer_arg;
        int ret = 0;
@@ -1533,34 +1581,46 @@ static int ath10k_station_assoc(struct ath10k *ar, struct ath10k_vif *arvif,
 
        ret = ath10k_peer_assoc_prepare(ar, arvif, sta, NULL, &peer_arg);
        if (ret) {
-               ath10k_warn("WMI peer assoc prepare failed for %pM vdev %i: %i\n",
+               ath10k_warn("failed to prepare WMI peer assoc for %pM vdev %i: %i\n",
                            sta->addr, arvif->vdev_id, ret);
                return ret;
        }
 
+       peer_arg.peer_reassoc = reassoc;
        ret = ath10k_wmi_peer_assoc(ar, &peer_arg);
        if (ret) {
-               ath10k_warn("Peer assoc failed for STA %pM vdev %i: %d\n",
+               ath10k_warn("failed to run peer assoc for STA %pM vdev %i: %d\n",
                            sta->addr, arvif->vdev_id, ret);
                return ret;
        }
 
        ret = ath10k_setup_peer_smps(ar, arvif, sta->addr, &sta->ht_cap);
        if (ret) {
-               ath10k_warn("failed to setup peer SMPS for vdev: %d\n", ret);
+               ath10k_warn("failed to setup peer SMPS for vdev %d: %d\n",
+                           arvif->vdev_id, ret);
                return ret;
        }
 
+       if (!sta->wme) {
+               arvif->num_legacy_stations++;
+               ret  = ath10k_recalc_rtscts_prot(arvif);
+               if (ret) {
+                       ath10k_warn("failed to recalculate rts/cts prot for vdev %d: %d\n",
+                                   arvif->vdev_id, ret);
+                       return ret;
+               }
+       }
+
        ret = ath10k_install_peer_wep_keys(arvif, sta->addr);
        if (ret) {
-               ath10k_warn("could not install peer wep keys for vdev %i: %d\n",
+               ath10k_warn("failed to install peer wep keys for vdev %i: %d\n",
                            arvif->vdev_id, ret);
                return ret;
        }
 
        ret = ath10k_peer_assoc_qos_ap(ar, arvif, sta);
        if (ret) {
-               ath10k_warn("could not set qos params for STA %pM for vdev %i: %d\n",
+               ath10k_warn("failed to set qos params for STA %pM for vdev %i: %d\n",
                            sta->addr, arvif->vdev_id, ret);
                return ret;
        }
@@ -1575,9 +1635,19 @@ static int ath10k_station_disassoc(struct ath10k *ar, struct ath10k_vif *arvif,
 
        lockdep_assert_held(&ar->conf_mutex);
 
+       if (!sta->wme) {
+               arvif->num_legacy_stations--;
+               ret = ath10k_recalc_rtscts_prot(arvif);
+               if (ret) {
+                       ath10k_warn("failed to recalculate rts/cts prot for vdev %d: %d\n",
+                                   arvif->vdev_id, ret);
+                       return ret;
+               }
+       }
+
        ret = ath10k_clear_peer_keys(arvif, sta->addr);
        if (ret) {
-               ath10k_warn("could not clear all peer wep keys for vdev %i: %d\n",
+               ath10k_warn("failed to clear all peer wep keys for vdev %i: %d\n",
                            arvif->vdev_id, ret);
                return ret;
        }
@@ -1685,19 +1755,44 @@ static int ath10k_update_channel_list(struct ath10k *ar)
        return ret;
 }
 
+static enum wmi_dfs_region
+ath10k_mac_get_dfs_region(enum nl80211_dfs_regions dfs_region)
+{
+       switch (dfs_region) {
+       case NL80211_DFS_UNSET:
+               return WMI_UNINIT_DFS_DOMAIN;
+       case NL80211_DFS_FCC:
+               return WMI_FCC_DFS_DOMAIN;
+       case NL80211_DFS_ETSI:
+               return WMI_ETSI_DFS_DOMAIN;
+       case NL80211_DFS_JP:
+               return WMI_MKK4_DFS_DOMAIN;
+       }
+       return WMI_UNINIT_DFS_DOMAIN;
+}
+
 static void ath10k_regd_update(struct ath10k *ar)
 {
        struct reg_dmn_pair_mapping *regpair;
        int ret;
+       enum wmi_dfs_region wmi_dfs_reg;
+       enum nl80211_dfs_regions nl_dfs_reg;
 
        lockdep_assert_held(&ar->conf_mutex);
 
        ret = ath10k_update_channel_list(ar);
        if (ret)
-               ath10k_warn("could not update channel list (%d)\n", ret);
+               ath10k_warn("failed to update channel list: %d\n", ret);
 
        regpair = ar->ath_common.regulatory.regpair;
 
+       if (config_enabled(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector) {
+               nl_dfs_reg = ar->dfs_detector->region;
+               wmi_dfs_reg = ath10k_mac_get_dfs_region(nl_dfs_reg);
+       } else {
+               wmi_dfs_reg = WMI_UNINIT_DFS_DOMAIN;
+       }
+
        /* Target allows setting up per-band regdomain but ath_common provides
         * a combined one only */
        ret = ath10k_wmi_pdev_set_regdomain(ar,
@@ -1705,9 +1800,10 @@ static void ath10k_regd_update(struct ath10k *ar)
                                            regpair->reg_domain, /* 2ghz */
                                            regpair->reg_domain, /* 5ghz */
                                            regpair->reg_2ghz_ctl,
-                                           regpair->reg_5ghz_ctl);
+                                           regpair->reg_5ghz_ctl,
+                                           wmi_dfs_reg);
        if (ret)
-               ath10k_warn("could not set pdev regdomain (%d)\n", ret);
+               ath10k_warn("failed to set pdev regdomain: %d\n", ret);
 }
 
 static void ath10k_reg_notifier(struct wiphy *wiphy,
@@ -1725,7 +1821,7 @@ static void ath10k_reg_notifier(struct wiphy *wiphy,
                result = ar->dfs_detector->set_dfs_domain(ar->dfs_detector,
                                                          request->dfs_region);
                if (!result)
-                       ath10k_warn("dfs region 0x%X not supported, will trigger radar for every pulse\n",
+                       ath10k_warn("DFS region 0x%X not supported, will trigger radar for every pulse\n",
                                    request->dfs_region);
        }
 
@@ -1759,10 +1855,10 @@ static u8 ath10k_tx_h_get_vdev_id(struct ath10k *ar,
        if (info->control.vif)
                return ath10k_vif_to_arvif(info->control.vif)->vdev_id;
 
-       if (ar->monitor_enabled)
+       if (ar->monitor_started)
                return ar->monitor_vdev_id;
 
-       ath10k_warn("could not resolve vdev id\n");
+       ath10k_warn("failed to resolve vdev id\n");
        return 0;
 }
 
@@ -1803,7 +1899,9 @@ static void ath10k_tx_wep_key_work(struct work_struct *work)
                                        arvif->ar->wmi.vdev_param->def_keyid,
                                        keyidx);
        if (ret) {
-               ath10k_warn("could not update wep keyidx (%d)\n", ret);
+               ath10k_warn("failed to update wep key index for vdev %d: %d\n",
+                           arvif->vdev_id,
+                           ret);
                return;
        }
 
@@ -1879,7 +1977,7 @@ static void ath10k_tx_htt(struct ath10k *ar, struct sk_buff *skb)
                             ar->fw_features)) {
                        if (skb_queue_len(&ar->wmi_mgmt_tx_queue) >=
                            ATH10K_MAX_NUM_MGMT_PENDING) {
-                               ath10k_warn("wmi mgmt_tx queue limit reached\n");
+                               ath10k_warn("reached WMI management tranmist queue limit\n");
                                ret = -EBUSY;
                                goto exit;
                        }
@@ -1903,7 +2001,7 @@ static void ath10k_tx_htt(struct ath10k *ar, struct sk_buff *skb)
 
 exit:
        if (ret) {
-               ath10k_warn("tx failed (%d). dropping packet.\n", ret);
+               ath10k_warn("failed to transmit packet, dropping: %d\n", ret);
                ieee80211_free_txskb(ar->hw, skb);
        }
 }
@@ -1964,7 +2062,7 @@ void ath10k_offchan_tx_work(struct work_struct *work)
                if (!peer) {
                        ret = ath10k_peer_create(ar, vdev_id, peer_addr);
                        if (ret)
-                               ath10k_warn("peer %pM on vdev %d not created (%d)\n",
+                               ath10k_warn("failed to create peer %pM on vdev %d: %d\n",
                                            peer_addr, vdev_id, ret);
                }
 
@@ -1984,7 +2082,7 @@ void ath10k_offchan_tx_work(struct work_struct *work)
                if (!peer) {
                        ret = ath10k_peer_delete(ar, vdev_id, peer_addr);
                        if (ret)
-                               ath10k_warn("peer %pM on vdev %d not deleted (%d)\n",
+                               ath10k_warn("failed to delete peer %pM on vdev %d: %d\n",
                                            peer_addr, vdev_id, ret);
                }
 
@@ -2018,7 +2116,8 @@ void ath10k_mgmt_over_wmi_tx_work(struct work_struct *work)
 
                ret = ath10k_wmi_mgmt_tx(ar, skb);
                if (ret) {
-                       ath10k_warn("wmi mgmt_tx failed (%d)\n", ret);
+                       ath10k_warn("failed to transmit management frame via WMI: %d\n",
+                                   ret);
                        ieee80211_free_txskb(ar->hw, skb);
                }
        }
@@ -2043,7 +2142,7 @@ void ath10k_reset_scan(unsigned long ptr)
                return;
        }
 
-       ath10k_warn("scan timeout. resetting. fw issue?\n");
+       ath10k_warn("scan timed out, firmware problem?\n");
 
        if (ar->scan.is_roc)
                ieee80211_remain_on_channel_expired(ar->hw);
@@ -2079,7 +2178,7 @@ static int ath10k_abort_scan(struct ath10k *ar)
 
        ret = ath10k_wmi_stop_scan(ar, &arg);
        if (ret) {
-               ath10k_warn("could not submit wmi stop scan (%d)\n", ret);
+               ath10k_warn("failed to stop wmi scan: %d\n", ret);
                spin_lock_bh(&ar->data_lock);
                ar->scan.in_progress = false;
                ath10k_offchan_tx_purge(ar);
@@ -2099,7 +2198,7 @@ static int ath10k_abort_scan(struct ath10k *ar)
 
        spin_lock_bh(&ar->data_lock);
        if (ar->scan.in_progress) {
-               ath10k_warn("could not stop scan. its still in progress\n");
+               ath10k_warn("failed to stop scan, it's still in progress\n");
                ar->scan.in_progress = false;
                ath10k_offchan_tx_purge(ar);
                ret = -ETIMEDOUT;
@@ -2194,7 +2293,13 @@ void ath10k_halt(struct ath10k *ar)
 {
        lockdep_assert_held(&ar->conf_mutex);
 
-       ath10k_stop_cac(ar);
+       if (ath10k_monitor_is_enabled(ar)) {
+               clear_bit(ATH10K_CAC_RUNNING, &ar->dev_flags);
+               ar->promisc = false;
+               ar->monitor = false;
+               ath10k_monitor_stop(ar);
+       }
+
        del_timer_sync(&ar->scan.timeout);
        ath10k_offchan_tx_purge(ar);
        ath10k_mgmt_over_wmi_tx_purge(ar);
@@ -2226,14 +2331,14 @@ static int ath10k_start(struct ieee80211_hw *hw)
 
        ret = ath10k_hif_power_up(ar);
        if (ret) {
-               ath10k_err("could not init hif (%d)\n", ret);
+               ath10k_err("Could not init hif: %d\n", ret);
                ar->state = ATH10K_STATE_OFF;
                goto exit;
        }
 
        ret = ath10k_core_start(ar);
        if (ret) {
-               ath10k_err("could not init core (%d)\n", ret);
+               ath10k_err("Could not init core: %d\n", ret);
                ath10k_hif_power_down(ar);
                ar->state = ATH10K_STATE_OFF;
                goto exit;
@@ -2246,13 +2351,11 @@ static int ath10k_start(struct ieee80211_hw *hw)
 
        ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->pmf_qos, 1);
        if (ret)
-               ath10k_warn("could not enable WMI_PDEV_PARAM_PMF_QOS (%d)\n",
-                           ret);
+               ath10k_warn("failed to enable PMF QOS: %d\n", ret);
 
        ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->dynamic_bw, 1);
        if (ret)
-               ath10k_warn("could not init WMI_PDEV_PARAM_DYNAMIC_BW (%d)\n",
-                           ret);
+               ath10k_warn("failed to enable dynamic BW: %d\n", ret);
 
        /*
         * By default FW set ARP frames ac to voice (6). In that case ARP
@@ -2266,11 +2369,12 @@ static int ath10k_start(struct ieee80211_hw *hw)
        ret = ath10k_wmi_pdev_set_param(ar,
                                        ar->wmi.pdev_param->arp_ac_override, 0);
        if (ret) {
-               ath10k_warn("could not set arp ac override parameter: %d\n",
+               ath10k_warn("failed to set arp ac override parameter: %d\n",
                            ret);
                goto exit;
        }
 
+       ar->num_started_vdevs = 0;
        ath10k_regd_update(ar);
        ret = 0;
 
@@ -2309,7 +2413,7 @@ static int ath10k_config_ps(struct ath10k *ar)
        list_for_each_entry(arvif, &ar->arvifs, list) {
                ret = ath10k_mac_vif_setup_ps(arvif);
                if (ret) {
-                       ath10k_warn("could not setup powersave (%d)\n", ret);
+                       ath10k_warn("failed to setup powersave: %d\n", ret);
                        break;
                }
        }
@@ -2343,7 +2447,6 @@ static const char *chandef_get_width(enum nl80211_chan_width width)
 static void ath10k_config_chan(struct ath10k *ar)
 {
        struct ath10k_vif *arvif;
-       bool monitor_was_enabled;
        int ret;
 
        lockdep_assert_held(&ar->conf_mutex);
@@ -2357,10 +2460,8 @@ static void ath10k_config_chan(struct ath10k *ar)
 
        /* First stop monitor interface. Some FW versions crash if there's a
         * lone monitor interface. */
-       monitor_was_enabled = ar->monitor_enabled;
-
-       if (ar->monitor_enabled)
-               ath10k_monitor_stop(ar);
+       if (ar->monitor_started)
+               ath10k_monitor_vdev_stop(ar);
 
        list_for_each_entry(arvif, &ar->arvifs, list) {
                if (!arvif->is_started)
@@ -2371,7 +2472,7 @@ static void ath10k_config_chan(struct ath10k *ar)
 
                ret = ath10k_vdev_stop(arvif);
                if (ret) {
-                       ath10k_warn("could not stop vdev %d (%d)\n",
+                       ath10k_warn("failed to stop vdev %d: %d\n",
                                    arvif->vdev_id, ret);
                        continue;
                }
@@ -2388,7 +2489,7 @@ static void ath10k_config_chan(struct ath10k *ar)
 
                ret = ath10k_vdev_start(arvif);
                if (ret) {
-                       ath10k_warn("could not start vdev %d (%d)\n",
+                       ath10k_warn("failed to start vdev %d: %d\n",
                                    arvif->vdev_id, ret);
                        continue;
                }
@@ -2399,14 +2500,14 @@ static void ath10k_config_chan(struct ath10k *ar)
                ret = ath10k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid,
                                         arvif->bssid);
                if (ret) {
-                       ath10k_warn("could not bring vdev up %d (%d)\n",
+                       ath10k_warn("failed to bring vdev up %d: %d\n",
                                    arvif->vdev_id, ret);
                        continue;
                }
        }
 
-       if (monitor_was_enabled)
-               ath10k_monitor_start(ar, ar->monitor_vdev_id);
+       if (ath10k_monitor_is_enabled(ar))
+               ath10k_monitor_vdev_start(ar, ar->monitor_vdev_id);
 }
 
 static int ath10k_config(struct ieee80211_hw *hw, u32 changed)
@@ -2420,15 +2521,17 @@ static int ath10k_config(struct ieee80211_hw *hw, u32 changed)
 
        if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
                ath10k_dbg(ATH10K_DBG_MAC,
-                          "mac config channel %d mhz flags 0x%x\n",
+                          "mac config channel %dMHz flags 0x%x radar %d\n",
                           conf->chandef.chan->center_freq,
-                          conf->chandef.chan->flags);
+                          conf->chandef.chan->flags,
+                          conf->radar_enabled);
 
                spin_lock_bh(&ar->data_lock);
                ar->rx_channel = conf->chandef.chan;
                spin_unlock_bh(&ar->data_lock);
 
-               ath10k_config_radar_detection(ar);
+               ar->radar_enabled = conf->radar_enabled;
+               ath10k_recalc_radar_detection(ar);
 
                if (!cfg80211_chandef_identical(&ar->chandef, &conf->chandef)) {
                        ar->chandef = conf->chandef;
@@ -2444,14 +2547,14 @@ static int ath10k_config(struct ieee80211_hw *hw, u32 changed)
                ret = ath10k_wmi_pdev_set_param(ar, param,
                                                hw->conf.power_level * 2);
                if (ret)
-                       ath10k_warn("mac failed to set 2g txpower %d (%d)\n",
+                       ath10k_warn("failed to set 2g txpower %d: %d\n",
                                    hw->conf.power_level, ret);
 
                param = ar->wmi.pdev_param->txpower_limit5g;
                ret = ath10k_wmi_pdev_set_param(ar, param,
                                                hw->conf.power_level * 2);
                if (ret)
-                       ath10k_warn("mac failed to set 5g txpower %d (%d)\n",
+                       ath10k_warn("failed to set 5g txpower %d: %d\n",
                                    hw->conf.power_level, ret);
        }
 
@@ -2459,10 +2562,19 @@ static int ath10k_config(struct ieee80211_hw *hw, u32 changed)
                ath10k_config_ps(ar);
 
        if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
-               if (conf->flags & IEEE80211_CONF_MONITOR)
-                       ret = ath10k_monitor_create(ar);
-               else
-                       ret = ath10k_monitor_destroy(ar);
+               if (conf->flags & IEEE80211_CONF_MONITOR && !ar->monitor) {
+                       ar->monitor = true;
+                       ret = ath10k_monitor_start(ar);
+                       if (ret) {
+                               ath10k_warn("failed to start monitor (config): %d\n",
+                                           ret);
+                               ar->monitor = false;
+                       }
+               } else if (!(conf->flags & IEEE80211_CONF_MONITOR) &&
+                          ar->monitor) {
+                       ar->monitor = false;
+                       ath10k_monitor_stop(ar);
+               }
        }
 
        mutex_unlock(&ar->conf_mutex);
@@ -2497,12 +2609,6 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
        INIT_WORK(&arvif->wep_key_work, ath10k_tx_wep_key_work);
        INIT_LIST_HEAD(&arvif->list);
 
-       if ((vif->type == NL80211_IFTYPE_MONITOR) && ar->monitor_present) {
-               ath10k_warn("Only one monitor interface allowed\n");
-               ret = -EBUSY;
-               goto err;
-       }
-
        bit = ffs(ar->free_vdev_map);
        if (bit == 0) {
                ret = -EBUSY;
@@ -2545,7 +2651,7 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
        ret = ath10k_wmi_vdev_create(ar, arvif->vdev_id, arvif->vdev_type,
                                     arvif->vdev_subtype, vif->addr);
        if (ret) {
-               ath10k_warn("WMI vdev %i create failed: ret %d\n",
+               ath10k_warn("failed to create WMI vdev %i: %d\n",
                            arvif->vdev_id, ret);
                goto err;
        }
@@ -2557,7 +2663,7 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
        ret = ath10k_wmi_vdev_set_param(ar, 0, vdev_param,
                                        arvif->def_wep_key_idx);
        if (ret) {
-               ath10k_warn("Failed to set vdev %i default keyid: %d\n",
+               ath10k_warn("failed to set vdev %i default key id: %d\n",
                            arvif->vdev_id, ret);
                goto err_vdev_delete;
        }
@@ -2567,7 +2673,7 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
                                        ATH10K_HW_TXRX_NATIVE_WIFI);
        /* 10.X firmware does not support this VDEV parameter. Do not warn */
        if (ret && ret != -EOPNOTSUPP) {
-               ath10k_warn("Failed to set vdev %i TX encap: %d\n",
+               ath10k_warn("failed to set vdev %i TX encapsulation: %d\n",
                            arvif->vdev_id, ret);
                goto err_vdev_delete;
        }
@@ -2575,14 +2681,14 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
        if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
                ret = ath10k_peer_create(ar, arvif->vdev_id, vif->addr);
                if (ret) {
-                       ath10k_warn("Failed to create vdev %i peer for AP: %d\n",
+                       ath10k_warn("failed to create vdev %i peer for AP: %d\n",
                                    arvif->vdev_id, ret);
                        goto err_vdev_delete;
                }
 
                ret = ath10k_mac_set_kickout(arvif);
                if (ret) {
-                       ath10k_warn("Failed to set vdev %i kickout parameters: %d\n",
+                       ath10k_warn("failed to set vdev %i kickout parameters: %d\n",
                                    arvif->vdev_id, ret);
                        goto err_peer_delete;
                }
@@ -2594,7 +2700,7 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
                ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
                                                  param, value);
                if (ret) {
-                       ath10k_warn("Failed to set vdev %i RX wake policy: %d\n",
+                       ath10k_warn("failed to set vdev %i RX wake policy: %d\n",
                                    arvif->vdev_id, ret);
                        goto err_peer_delete;
                }
@@ -2604,7 +2710,7 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
                ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
                                                  param, value);
                if (ret) {
-                       ath10k_warn("Failed to set vdev %i TX wake thresh: %d\n",
+                       ath10k_warn("failed to set vdev %i TX wake thresh: %d\n",
                                    arvif->vdev_id, ret);
                        goto err_peer_delete;
                }
@@ -2614,7 +2720,7 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
                ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
                                                  param, value);
                if (ret) {
-                       ath10k_warn("Failed to set vdev %i PSPOLL count: %d\n",
+                       ath10k_warn("failed to set vdev %i PSPOLL count: %d\n",
                                    arvif->vdev_id, ret);
                        goto err_peer_delete;
                }
@@ -2622,21 +2728,18 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
 
        ret = ath10k_mac_set_rts(arvif, ar->hw->wiphy->rts_threshold);
        if (ret) {
-               ath10k_warn("failed to set rts threshold for vdev %d (%d)\n",
+               ath10k_warn("failed to set rts threshold for vdev %d: %d\n",
                            arvif->vdev_id, ret);
                goto err_peer_delete;
        }
 
        ret = ath10k_mac_set_frag(arvif, ar->hw->wiphy->frag_threshold);
        if (ret) {
-               ath10k_warn("failed to set frag threshold for vdev %d (%d)\n",
+               ath10k_warn("failed to set frag threshold for vdev %d: %d\n",
                            arvif->vdev_id, ret);
                goto err_peer_delete;
        }
 
-       if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR)
-               ar->monitor_present = true;
-
        mutex_unlock(&ar->conf_mutex);
        return 0;
 
@@ -2679,7 +2782,7 @@ static void ath10k_remove_interface(struct ieee80211_hw *hw,
        if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
                ret = ath10k_peer_delete(arvif->ar, arvif->vdev_id, vif->addr);
                if (ret)
-                       ath10k_warn("Failed to remove peer for AP vdev %i: %d\n",
+                       ath10k_warn("failed to remove peer for AP vdev %i: %d\n",
                                    arvif->vdev_id, ret);
 
                kfree(arvif->u.ap.noa_data);
@@ -2690,12 +2793,9 @@ static void ath10k_remove_interface(struct ieee80211_hw *hw,
 
        ret = ath10k_wmi_vdev_delete(ar, arvif->vdev_id);
        if (ret)
-               ath10k_warn("WMI vdev %i delete failed: %d\n",
+               ath10k_warn("failed to delete WMI vdev %i: %d\n",
                            arvif->vdev_id, ret);
 
-       if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR)
-               ar->monitor_present = false;
-
        ath10k_peer_cleanup(ar, arvif->vdev_id);
 
        mutex_unlock(&ar->conf_mutex);
@@ -2728,28 +2828,17 @@ static void ath10k_configure_filter(struct ieee80211_hw *hw,
        *total_flags &= SUPPORTED_FILTERS;
        ar->filter_flags = *total_flags;
 
-       /* Monitor must not be started if it wasn't created first.
-        * Promiscuous mode may be started on a non-monitor interface - in
-        * such case the monitor vdev is not created so starting the
-        * monitor makes no sense. Since ath10k uses no special RX filters
-        * (only BSS filter in STA mode) there's no need for any special
-        * action here. */
-       if ((ar->filter_flags & FIF_PROMISC_IN_BSS) &&
-           !ar->monitor_enabled && ar->monitor_present) {
-               ath10k_dbg(ATH10K_DBG_MAC, "mac monitor %d start\n",
-                          ar->monitor_vdev_id);
-
-               ret = ath10k_monitor_start(ar, ar->monitor_vdev_id);
-               if (ret)
-                       ath10k_warn("Unable to start monitor mode\n");
-       } else if (!(ar->filter_flags & FIF_PROMISC_IN_BSS) &&
-                  ar->monitor_enabled && ar->monitor_present) {
-               ath10k_dbg(ATH10K_DBG_MAC, "mac monitor %d stop\n",
-                          ar->monitor_vdev_id);
-
-               ret = ath10k_monitor_stop(ar);
-               if (ret)
-                       ath10k_warn("Unable to stop monitor mode\n");
+       if (ar->filter_flags & FIF_PROMISC_IN_BSS && !ar->promisc) {
+               ar->promisc = true;
+               ret = ath10k_monitor_start(ar);
+               if (ret) {
+                       ath10k_warn("failed to start monitor (promisc): %d\n",
+                                   ret);
+                       ar->promisc = false;
+               }
+       } else if (!(ar->filter_flags & FIF_PROMISC_IN_BSS) && ar->promisc) {
+               ar->promisc = false;
+               ath10k_monitor_stop(ar);
        }
 
        mutex_unlock(&ar->conf_mutex);
@@ -2780,7 +2869,7 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
                           arvif->vdev_id, arvif->beacon_interval);
 
                if (ret)
-                       ath10k_warn("Failed to set beacon interval for vdev %d: %i\n",
+                       ath10k_warn("failed to set beacon interval for vdev %d: %i\n",
                                    arvif->vdev_id, ret);
        }
 
@@ -2793,7 +2882,7 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
                ret = ath10k_wmi_pdev_set_param(ar, pdev_param,
                                                WMI_BEACON_STAGGERED_MODE);
                if (ret)
-                       ath10k_warn("Failed to set beacon mode for vdev %d: %i\n",
+                       ath10k_warn("failed to set beacon mode for vdev %d: %i\n",
                                    arvif->vdev_id, ret);
        }
 
@@ -2808,7 +2897,7 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
                ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
                                                arvif->dtim_period);
                if (ret)
-                       ath10k_warn("Failed to set dtim period for vdev %d: %i\n",
+                       ath10k_warn("failed to set dtim period for vdev %d: %i\n",
                                    arvif->vdev_id, ret);
        }
 
@@ -2829,7 +2918,7 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
                        ret = ath10k_peer_create(ar, arvif->vdev_id,
                                                 info->bssid);
                        if (ret)
-                               ath10k_warn("Failed to add peer %pM for vdev %d when changing bssid: %i\n",
+                               ath10k_warn("failed to add peer %pM for vdev %d when changing bssid: %i\n",
                                            info->bssid, arvif->vdev_id, ret);
 
                        if (vif->type == NL80211_IFTYPE_STATION) {
@@ -2868,20 +2957,13 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
                ath10k_control_beaconing(arvif, info);
 
        if (changed & BSS_CHANGED_ERP_CTS_PROT) {
-               u32 cts_prot;
-               if (info->use_cts_prot)
-                       cts_prot = 1;
-               else
-                       cts_prot = 0;
-
+               arvif->use_cts_prot = info->use_cts_prot;
                ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d cts_prot %d\n",
-                          arvif->vdev_id, cts_prot);
+                          arvif->vdev_id, info->use_cts_prot);
 
-               vdev_param = ar->wmi.vdev_param->enable_rtscts;
-               ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
-                                               cts_prot);
+               ret = ath10k_recalc_rtscts_prot(arvif);
                if (ret)
-                       ath10k_warn("Failed to set CTS prot for vdev %d: %d\n",
+                       ath10k_warn("failed to recalculate rts/cts prot for vdev %d: %d\n",
                                    arvif->vdev_id, ret);
        }
 
@@ -2900,7 +2982,7 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
                ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
                                                slottime);
                if (ret)
-                       ath10k_warn("Failed to set erp slot for vdev %d: %i\n",
+                       ath10k_warn("failed to set erp slot for vdev %d: %i\n",
                                    arvif->vdev_id, ret);
        }
 
@@ -2919,7 +3001,7 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
                ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
                                                preamble);
                if (ret)
-                       ath10k_warn("Failed to set preamble for vdev %d: %i\n",
+                       ath10k_warn("failed to set preamble for vdev %d: %i\n",
                                    arvif->vdev_id, ret);
        }
 
@@ -2990,7 +3072,7 @@ static int ath10k_hw_scan(struct ieee80211_hw *hw,
 
        ret = ath10k_start_scan(ar, &arg);
        if (ret) {
-               ath10k_warn("could not start hw scan (%d)\n", ret);
+               ath10k_warn("failed to start hw scan: %d\n", ret);
                spin_lock_bh(&ar->data_lock);
                ar->scan.in_progress = false;
                spin_unlock_bh(&ar->data_lock);
@@ -3010,8 +3092,7 @@ static void ath10k_cancel_hw_scan(struct ieee80211_hw *hw,
        mutex_lock(&ar->conf_mutex);
        ret = ath10k_abort_scan(ar);
        if (ret) {
-               ath10k_warn("couldn't abort scan (%d). forcefully sending scan completion to mac80211\n",
-                           ret);
+               ath10k_warn("failed to abort scan: %d\n", ret);
                ieee80211_scan_completed(hw, 1 /* aborted */);
        }
        mutex_unlock(&ar->conf_mutex);
@@ -3089,7 +3170,7 @@ static int ath10k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
 
        if (!peer) {
                if (cmd == SET_KEY) {
-                       ath10k_warn("cannot install key for non-existent peer %pM\n",
+                       ath10k_warn("failed to install key for non-existent peer %pM\n",
                                    peer_addr);
                        ret = -EOPNOTSUPP;
                        goto exit;
@@ -3112,7 +3193,7 @@ static int ath10k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
 
        ret = ath10k_install_key(arvif, key, cmd, peer_addr);
        if (ret) {
-               ath10k_warn("key installation failed for vdev %i peer %pM: %d\n",
+               ath10k_warn("failed to install key for vdev %i peer %pM: %d\n",
                            arvif->vdev_id, peer_addr, ret);
                goto exit;
        }
@@ -3127,7 +3208,7 @@ static int ath10k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
                peer->keys[key->keyidx] = NULL;
        else if (peer == NULL)
                /* impossible unless FW goes crazy */
-               ath10k_warn("peer %pM disappeared!\n", peer_addr);
+               ath10k_warn("Peer %pM disappeared!\n", peer_addr);
        spin_unlock_bh(&ar->data_lock);
 
 exit:
@@ -3195,6 +3276,16 @@ static void ath10k_sta_rc_update_wk(struct work_struct *wk)
                                    sta->addr, smps, err);
        }
 
+       if (changed & IEEE80211_RC_SUPP_RATES_CHANGED) {
+               ath10k_dbg(ATH10K_DBG_MAC, "mac update sta %pM supp rates\n",
+                          sta->addr);
+
+               err = ath10k_station_assoc(ar, arvif, sta, true);
+               if (err)
+                       ath10k_warn("failed to reassociate station: %pM\n",
+                                   sta->addr);
+       }
+
        mutex_unlock(&ar->conf_mutex);
 }
 
@@ -3236,7 +3327,7 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
                        max_num_peers = TARGET_NUM_PEERS;
 
                if (ar->num_peers >= max_num_peers) {
-                       ath10k_warn("Number of peers exceeded: peers number %d (max peers %d)\n",
+                       ath10k_warn("number of peers exceeded: peers number %d (max peers %d)\n",
                                    ar->num_peers, max_num_peers);
                        ret = -ENOBUFS;
                        goto exit;
@@ -3248,7 +3339,7 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
 
                ret = ath10k_peer_create(ar, arvif->vdev_id, sta->addr);
                if (ret)
-                       ath10k_warn("Failed to add peer %pM for vdev %d when adding a new sta: %i\n",
+                       ath10k_warn("failed to add peer %pM for vdev %d when adding a new sta: %i\n",
                                    sta->addr, arvif->vdev_id, ret);
        } else if ((old_state == IEEE80211_STA_NONE &&
                    new_state == IEEE80211_STA_NOTEXIST)) {
@@ -3260,7 +3351,7 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
                           arvif->vdev_id, sta->addr);
                ret = ath10k_peer_delete(ar, arvif->vdev_id, sta->addr);
                if (ret)
-                       ath10k_warn("Failed to delete peer %pM for vdev %d: %i\n",
+                       ath10k_warn("failed to delete peer %pM for vdev %d: %i\n",
                                    sta->addr, arvif->vdev_id, ret);
 
                if (vif->type == NL80211_IFTYPE_STATION)
@@ -3275,9 +3366,9 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
                ath10k_dbg(ATH10K_DBG_MAC, "mac sta %pM associated\n",
                           sta->addr);
 
-               ret = ath10k_station_assoc(ar, arvif, sta);
+               ret = ath10k_station_assoc(ar, arvif, sta, false);
                if (ret)
-                       ath10k_warn("Failed to associate station %pM for vdev %i: %i\n",
+                       ath10k_warn("failed to associate station %pM for vdev %i: %i\n",
                                    sta->addr, arvif->vdev_id, ret);
        } else if (old_state == IEEE80211_STA_ASSOC &&
                   new_state == IEEE80211_STA_AUTH &&
@@ -3291,7 +3382,7 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
 
                ret = ath10k_station_disassoc(ar, arvif, sta);
                if (ret)
-                       ath10k_warn("Failed to disassociate station: %pM vdev %i ret %i\n",
+                       ath10k_warn("failed to disassociate station: %pM vdev %i: %i\n",
                                    sta->addr, arvif->vdev_id, ret);
        }
 exit:
@@ -3339,7 +3430,7 @@ static int ath10k_conf_tx_uapsd(struct ath10k *ar, struct ieee80211_vif *vif,
                                          WMI_STA_PS_PARAM_UAPSD,
                                          arvif->u.sta.uapsd);
        if (ret) {
-               ath10k_warn("could not set uapsd params %d\n", ret);
+               ath10k_warn("failed to set uapsd params: %d\n", ret);
                goto exit;
        }
 
@@ -3352,7 +3443,7 @@ static int ath10k_conf_tx_uapsd(struct ath10k *ar, struct ieee80211_vif *vif,
                                          WMI_STA_PS_PARAM_RX_WAKE_POLICY,
                                          value);
        if (ret)
-               ath10k_warn("could not set rx wake param %d\n", ret);
+               ath10k_warn("failed to set rx wake param: %d\n", ret);
 
 exit:
        return ret;
@@ -3402,13 +3493,13 @@ static int ath10k_conf_tx(struct ieee80211_hw *hw,
        /* FIXME: FW accepts wmm params per hw, not per vif */
        ret = ath10k_wmi_pdev_set_wmm_params(ar, &ar->wmm_params);
        if (ret) {
-               ath10k_warn("could not set wmm params %d\n", ret);
+               ath10k_warn("failed to set wmm params: %d\n", ret);
                goto exit;
        }
 
        ret = ath10k_conf_tx_uapsd(ar, vif, ac, params->uapsd);
        if (ret)
-               ath10k_warn("could not set sta uapsd %d\n", ret);
+               ath10k_warn("failed to set sta uapsd: %d\n", ret);
 
 exit:
        mutex_unlock(&ar->conf_mutex);
@@ -3461,7 +3552,7 @@ static int ath10k_remain_on_channel(struct ieee80211_hw *hw,
 
        ret = ath10k_start_scan(ar, &arg);
        if (ret) {
-               ath10k_warn("could not start roc scan (%d)\n", ret);
+               ath10k_warn("failed to start roc scan: %d\n", ret);
                spin_lock_bh(&ar->data_lock);
                ar->scan.in_progress = false;
                spin_unlock_bh(&ar->data_lock);
@@ -3470,7 +3561,7 @@ static int ath10k_remain_on_channel(struct ieee80211_hw *hw,
 
        ret = wait_for_completion_timeout(&ar->scan.on_channel, 3*HZ);
        if (ret == 0) {
-               ath10k_warn("could not switch to channel for roc scan\n");
+               ath10k_warn("failed to switch to channel for roc scan\n");
                ath10k_abort_scan(ar);
                ret = -ETIMEDOUT;
                goto exit;
@@ -3511,7 +3602,7 @@ static int ath10k_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
 
                ret = ath10k_mac_set_rts(arvif, value);
                if (ret) {
-                       ath10k_warn("could not set rts threshold for vdev %d (%d)\n",
+                       ath10k_warn("failed to set rts threshold for vdev %d: %d\n",
                                    arvif->vdev_id, ret);
                        break;
                }
@@ -3534,7 +3625,7 @@ static int ath10k_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
 
                ret = ath10k_mac_set_rts(arvif, value);
                if (ret) {
-                       ath10k_warn("could not set fragmentation threshold for vdev %d (%d)\n",
+                       ath10k_warn("failed to set fragmentation threshold for vdev %d: %d\n",
                                    arvif->vdev_id, ret);
                        break;
                }
@@ -3544,7 +3635,8 @@ static int ath10k_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
        return ret;
 }
 
-static void ath10k_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
+static void ath10k_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+                        u32 queues, bool drop)
 {
        struct ath10k *ar = hw->priv;
        bool skip;
@@ -3573,7 +3665,7 @@ static void ath10k_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
                }), ATH10K_FLUSH_TIMEOUT_HZ);
 
        if (ret <= 0 || skip)
-               ath10k_warn("tx not flushed (skip %i ar-state %i): %i\n",
+               ath10k_warn("failed to flush transmit queue (skip %i ar-state %i): %i\n",
                            skip, ar->state, ret);
 
 skip:
@@ -3608,7 +3700,7 @@ static int ath10k_suspend(struct ieee80211_hw *hw,
 
        ret = ath10k_hif_suspend(ar);
        if (ret) {
-               ath10k_warn("could not suspend hif (%d)\n", ret);
+               ath10k_warn("failed to suspend hif: %d\n", ret);
                goto resume;
        }
 
@@ -3617,7 +3709,7 @@ static int ath10k_suspend(struct ieee80211_hw *hw,
 resume:
        ret = ath10k_wmi_pdev_resume_target(ar);
        if (ret)
-               ath10k_warn("could not resume target (%d)\n", ret);
+               ath10k_warn("failed to resume target: %d\n", ret);
 
        ret = 1;
 exit:
@@ -3634,14 +3726,14 @@ static int ath10k_resume(struct ieee80211_hw *hw)
 
        ret = ath10k_hif_resume(ar);
        if (ret) {
-               ath10k_warn("could not resume hif (%d)\n", ret);
+               ath10k_warn("failed to resume hif: %d\n", ret);
                ret = 1;
                goto exit;
        }
 
        ret = ath10k_wmi_pdev_resume_target(ar);
        if (ret) {
-               ath10k_warn("could not resume target (%d)\n", ret);
+               ath10k_warn("failed to resume target: %d\n", ret);
                ret = 1;
                goto exit;
        }
@@ -3964,7 +4056,7 @@ static int ath10k_set_fixed_rate_param(struct ath10k_vif *arvif,
        ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
                                        vdev_param, fixed_rate);
        if (ret) {
-               ath10k_warn("Could not set fixed_rate param 0x%02x: %d\n",
+               ath10k_warn("failed to set fixed rate param 0x%02x: %d\n",
                            fixed_rate, ret);
                ret = -EINVAL;
                goto exit;
@@ -3977,7 +4069,7 @@ static int ath10k_set_fixed_rate_param(struct ath10k_vif *arvif,
                                        vdev_param, fixed_nss);
 
        if (ret) {
-               ath10k_warn("Could not set fixed_nss param %d: %d\n",
+               ath10k_warn("failed to set fixed nss param %d: %d\n",
                            fixed_nss, ret);
                ret = -EINVAL;
                goto exit;
@@ -3990,7 +4082,7 @@ static int ath10k_set_fixed_rate_param(struct ath10k_vif *arvif,
                                        force_sgi);
 
        if (ret) {
-               ath10k_warn("Could not set sgi param %d: %d\n",
+               ath10k_warn("failed to set sgi param %d: %d\n",
                            force_sgi, ret);
                ret = -EINVAL;
                goto exit;
@@ -4026,7 +4118,7 @@ static int ath10k_set_bitrate_mask(struct ieee80211_hw *hw,
        }
 
        if (fixed_rate == WMI_FIXED_RATE_NONE && force_sgi) {
-               ath10k_warn("Could not force SGI usage for default rate settings\n");
+               ath10k_warn("failed to force SGI usage for default rate settings\n");
                return -EINVAL;
        }
 
@@ -4072,8 +4164,8 @@ static void ath10k_sta_rc_update(struct ieee80211_hw *hw,
                        bw = WMI_PEER_CHWIDTH_80MHZ;
                        break;
                case IEEE80211_STA_RX_BW_160:
-                       ath10k_warn("mac sta rc update for %pM: invalid bw %d\n",
-                                   sta->addr, sta->bandwidth);
+                       ath10k_warn("Invalid bandwith %d in rc update for %pM\n",
+                                   sta->bandwidth, sta->addr);
                        bw = WMI_PEER_CHWIDTH_20MHZ;
                        break;
                }
@@ -4099,8 +4191,8 @@ static void ath10k_sta_rc_update(struct ieee80211_hw *hw,
                        smps = WMI_PEER_SMPS_DYNAMIC;
                        break;
                case IEEE80211_SMPS_NUM_MODES:
-                       ath10k_warn("mac sta rc update for %pM: invalid smps: %d\n",
-                                   sta->addr, sta->smps_mode);
+                       ath10k_warn("Invalid smps %d in sta rc update for %pM\n",
+                                   sta->smps_mode, sta->addr);
                        smps = WMI_PEER_SMPS_PS_NONE;
                        break;
                }
@@ -4108,15 +4200,6 @@ static void ath10k_sta_rc_update(struct ieee80211_hw *hw,
                arsta->smps = smps;
        }
 
-       if (changed & IEEE80211_RC_SUPP_RATES_CHANGED) {
-               /* FIXME: Not implemented. Probably the only way to do it would
-                * be to re-assoc the peer. */
-               changed &= ~IEEE80211_RC_SUPP_RATES_CHANGED;
-               ath10k_dbg(ATH10K_DBG_MAC,
-                          "mac sta rc update for %pM: changing supported rates not implemented\n",
-                          sta->addr);
-       }
-
        arsta->changed |= changed;
 
        spin_unlock_bh(&ar->data_lock);
@@ -4516,7 +4599,6 @@ int ath10k_mac_register(struct ath10k *ar)
                        IEEE80211_HW_REPORTS_TX_ACK_STATUS |
                        IEEE80211_HW_HAS_RATE_CONTROL |
                        IEEE80211_HW_SUPPORTS_STATIC_SMPS |
-                       IEEE80211_HW_WANT_MONITOR_VIF |
                        IEEE80211_HW_AP_LINK_PS |
                        IEEE80211_HW_SPECTRUM_MGMT;
 
@@ -4570,19 +4652,19 @@ int ath10k_mac_register(struct ath10k *ar)
                                                             NL80211_DFS_UNSET);
 
                if (!ar->dfs_detector)
-                       ath10k_warn("dfs pattern detector init failed\n");
+                       ath10k_warn("failed to initialise DFS pattern detector\n");
        }
 
        ret = ath_regd_init(&ar->ath_common.regulatory, ar->hw->wiphy,
                            ath10k_reg_notifier);
        if (ret) {
-               ath10k_err("Regulatory initialization failed: %i\n", ret);
+               ath10k_err("failed to initialise regulatory: %i\n", ret);
                goto err_free;
        }
 
        ret = ieee80211_register_hw(ar->hw);
        if (ret) {
-               ath10k_err("ieee80211 registration failed: %d\n", ret);
+               ath10k_err("failed to register ieee80211: %d\n", ret);
                goto err_free;
        }
 
index 9d242d801d9d354f772b74257826427f0a598180..bf1083d52e61d5f4c29bc6e649f357bf74c7ad51 100644 (file)
@@ -39,15 +39,27 @@ enum ath10k_pci_irq_mode {
        ATH10K_PCI_IRQ_MSI = 2,
 };
 
-static unsigned int ath10k_target_ps;
+enum ath10k_pci_reset_mode {
+       ATH10K_PCI_RESET_AUTO = 0,
+       ATH10K_PCI_RESET_WARM_ONLY = 1,
+};
+
+static unsigned int ath10k_pci_target_ps;
 static unsigned int ath10k_pci_irq_mode = ATH10K_PCI_IRQ_AUTO;
+static unsigned int ath10k_pci_reset_mode = ATH10K_PCI_RESET_AUTO;
 
-module_param(ath10k_target_ps, uint, 0644);
-MODULE_PARM_DESC(ath10k_target_ps, "Enable ath10k Target (SoC) PS option");
+module_param_named(target_ps, ath10k_pci_target_ps, uint, 0644);
+MODULE_PARM_DESC(target_ps, "Enable ath10k Target (SoC) PS option");
 
 module_param_named(irq_mode, ath10k_pci_irq_mode, uint, 0644);
 MODULE_PARM_DESC(irq_mode, "0: auto, 1: legacy, 2: msi (default: 0)");
 
+module_param_named(reset_mode, ath10k_pci_reset_mode, uint, 0644);
+MODULE_PARM_DESC(reset_mode, "0: auto, 1: warm only (default: 0)");
+
+/* how long wait to wait for target to initialise, in ms */
+#define ATH10K_PCI_TARGET_WAIT 3000
+
 #define QCA988X_2_0_DEVICE_ID  (0x003c)
 
 static DEFINE_PCI_DEVICE_TABLE(ath10k_pci_id_table) = {
@@ -346,9 +358,10 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
         *   2) Buffer in DMA-able space
         */
        orig_nbytes = nbytes;
-       data_buf = (unsigned char *)pci_alloc_consistent(ar_pci->pdev,
-                                                        orig_nbytes,
-                                                        &ce_data_base);
+       data_buf = (unsigned char *)dma_alloc_coherent(ar->dev,
+                                                      orig_nbytes,
+                                                      &ce_data_base,
+                                                      GFP_ATOMIC);
 
        if (!data_buf) {
                ret = -ENOMEM;
@@ -442,12 +455,12 @@ done:
                                __le32_to_cpu(((__le32 *)data_buf)[i]);
                }
        } else
-               ath10k_dbg(ATH10K_DBG_PCI, "%s failure (0x%x)\n",
-                          __func__, address);
+               ath10k_warn("failed to read diag value at 0x%x: %d\n",
+                           address, ret);
 
        if (data_buf)
-               pci_free_consistent(ar_pci->pdev, orig_nbytes,
-                                   data_buf, ce_data_base);
+               dma_free_coherent(ar->dev, orig_nbytes, data_buf,
+                                 ce_data_base);
 
        return ret;
 }
@@ -490,9 +503,10 @@ static int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
         *   2) Buffer in DMA-able space
         */
        orig_nbytes = nbytes;
-       data_buf = (unsigned char *)pci_alloc_consistent(ar_pci->pdev,
-                                                        orig_nbytes,
-                                                        &ce_data_base);
+       data_buf = (unsigned char *)dma_alloc_coherent(ar->dev,
+                                                      orig_nbytes,
+                                                      &ce_data_base,
+                                                      GFP_ATOMIC);
        if (!data_buf) {
                ret = -ENOMEM;
                goto done;
@@ -588,13 +602,13 @@ static int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
 
 done:
        if (data_buf) {
-               pci_free_consistent(ar_pci->pdev, orig_nbytes, data_buf,
-                                   ce_data_base);
+               dma_free_coherent(ar->dev, orig_nbytes, data_buf,
+                                 ce_data_base);
        }
 
        if (ret != 0)
-               ath10k_dbg(ATH10K_DBG_PCI, "%s failure (0x%x)\n", __func__,
-                          address);
+               ath10k_warn("failed to write diag value at 0x%x: %d\n",
+                           address, ret);
 
        return ret;
 }
@@ -803,6 +817,9 @@ unlock:
 static u16 ath10k_pci_hif_get_free_queue_number(struct ath10k *ar, u8 pipe)
 {
        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+
+       ath10k_dbg(ATH10K_DBG_PCI, "pci hif get free queue number\n");
+
        return ath10k_ce_num_free_src_entries(ar_pci->pipe_info[pipe].ce_hdl);
 }
 
@@ -854,6 +871,8 @@ static void ath10k_pci_hif_dump_area(struct ath10k *ar)
 static void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe,
                                               int force)
 {
+       ath10k_dbg(ATH10K_DBG_PCI, "pci hif send complete check\n");
+
        if (!force) {
                int resources;
                /*
@@ -880,7 +899,7 @@ static void ath10k_pci_hif_set_callbacks(struct ath10k *ar,
 {
        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 
-       ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
+       ath10k_dbg(ATH10K_DBG_PCI, "pci hif set callbacks\n");
 
        memcpy(&ar_pci->msg_callbacks_current, callbacks,
               sizeof(ar_pci->msg_callbacks_current));
@@ -938,6 +957,8 @@ static int ath10k_pci_hif_map_service_to_pipe(struct ath10k *ar,
 {
        int ret = 0;
 
+       ath10k_dbg(ATH10K_DBG_PCI, "pci hif map service\n");
+
        /* polling for received messages not supported */
        *dl_is_polled = 0;
 
@@ -997,6 +1018,8 @@ static void ath10k_pci_hif_get_default_pipe(struct ath10k *ar,
 {
        int ul_is_polled, dl_is_polled;
 
+       ath10k_dbg(ATH10K_DBG_PCI, "pci hif get default pipe\n");
+
        (void)ath10k_pci_hif_map_service_to_pipe(ar,
                                                 ATH10K_HTC_SVC_ID_RSVD_CTRL,
                                                 ul_pipe,
@@ -1098,6 +1121,8 @@ static int ath10k_pci_hif_start(struct ath10k *ar)
        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
        int ret, ret_early;
 
+       ath10k_dbg(ATH10K_DBG_BOOT, "boot hif start\n");
+
        ath10k_pci_free_early_irq(ar);
        ath10k_pci_kill_tasklet(ar);
 
@@ -1233,18 +1258,10 @@ static void ath10k_pci_buffer_cleanup(struct ath10k *ar)
 
 static void ath10k_pci_ce_deinit(struct ath10k *ar)
 {
-       struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
-       struct ath10k_pci_pipe *pipe_info;
-       int pipe_num;
+       int i;
 
-       for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
-               pipe_info = &ar_pci->pipe_info[pipe_num];
-               if (pipe_info->ce_hdl) {
-                       ath10k_ce_deinit(pipe_info->ce_hdl);
-                       pipe_info->ce_hdl = NULL;
-                       pipe_info->buf_sz = 0;
-               }
-       }
+       for (i = 0; i < CE_COUNT; i++)
+               ath10k_ce_deinit_pipe(ar, i);
 }
 
 static void ath10k_pci_hif_stop(struct ath10k *ar)
@@ -1252,7 +1269,7 @@ static void ath10k_pci_hif_stop(struct ath10k *ar)
        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
        int ret;
 
-       ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
+       ath10k_dbg(ATH10K_DBG_BOOT, "boot hif stop\n");
 
        ret = ath10k_ce_disable_interrupts(ar);
        if (ret)
@@ -1697,30 +1714,49 @@ static int ath10k_pci_init_config(struct ath10k *ar)
        return 0;
 }
 
+static int ath10k_pci_alloc_ce(struct ath10k *ar)
+{
+       int i, ret;
+
+       for (i = 0; i < CE_COUNT; i++) {
+               ret = ath10k_ce_alloc_pipe(ar, i, &host_ce_config_wlan[i]);
+               if (ret) {
+                       ath10k_err("failed to allocate copy engine pipe %d: %d\n",
+                                  i, ret);
+                       return ret;
+               }
+       }
 
+       return 0;
+}
+
+static void ath10k_pci_free_ce(struct ath10k *ar)
+{
+       int i;
+
+       for (i = 0; i < CE_COUNT; i++)
+               ath10k_ce_free_pipe(ar, i);
+}
 
 static int ath10k_pci_ce_init(struct ath10k *ar)
 {
        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
        struct ath10k_pci_pipe *pipe_info;
        const struct ce_attr *attr;
-       int pipe_num;
+       int pipe_num, ret;
 
        for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
                pipe_info = &ar_pci->pipe_info[pipe_num];
+               pipe_info->ce_hdl = &ar_pci->ce_states[pipe_num];
                pipe_info->pipe_num = pipe_num;
                pipe_info->hif_ce_state = ar;
                attr = &host_ce_config_wlan[pipe_num];
 
-               pipe_info->ce_hdl = ath10k_ce_init(ar, pipe_num, attr);
-               if (pipe_info->ce_hdl == NULL) {
-                       ath10k_err("failed to initialize CE for pipe: %d\n",
-                                  pipe_num);
-
-                       /* It is safe to call it here. It checks if ce_hdl is
-                        * valid for each pipe */
-                       ath10k_pci_ce_deinit(ar);
-                       return -1;
+               ret = ath10k_ce_init_pipe(ar, pipe_num, attr);
+               if (ret) {
+                       ath10k_err("failed to initialize copy engine pipe %d: %d\n",
+                                  pipe_num, ret);
+                       return ret;
                }
 
                if (pipe_num == CE_COUNT - 1) {
@@ -1741,16 +1777,15 @@ static int ath10k_pci_ce_init(struct ath10k *ar)
 static void ath10k_pci_fw_interrupt_handler(struct ath10k *ar)
 {
        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
-       u32 fw_indicator_address, fw_indicator;
+       u32 fw_indicator;
 
        ath10k_pci_wake(ar);
 
-       fw_indicator_address = ar_pci->fw_indicator_address;
-       fw_indicator = ath10k_pci_read32(ar, fw_indicator_address);
+       fw_indicator = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS);
 
        if (fw_indicator & FW_IND_EVENT_PENDING) {
                /* ACK: clear Target-side pending event */
-               ath10k_pci_write32(ar, fw_indicator_address,
+               ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS,
                                   fw_indicator & ~FW_IND_EVENT_PENDING);
 
                if (ar_pci->started) {
@@ -1769,11 +1804,10 @@ static void ath10k_pci_fw_interrupt_handler(struct ath10k *ar)
 
 static int ath10k_pci_warm_reset(struct ath10k *ar)
 {
-       struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
        int ret = 0;
        u32 val;
 
-       ath10k_dbg(ATH10K_DBG_BOOT, "boot performing warm chip reset\n");
+       ath10k_dbg(ATH10K_DBG_BOOT, "boot warm reset\n");
 
        ret = ath10k_do_pci_wake(ar);
        if (ret) {
@@ -1801,7 +1835,7 @@ static int ath10k_pci_warm_reset(struct ath10k *ar)
        msleep(100);
 
        /* clear fw indicator */
-       ath10k_pci_write32(ar, ar_pci->fw_indicator_address, 0);
+       ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS, 0);
 
        /* clear target LF timer interrupts */
        val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
@@ -1934,7 +1968,9 @@ static int __ath10k_pci_hif_power_up(struct ath10k *ar, bool cold_reset)
                irq_mode = "legacy";
 
        if (!test_bit(ATH10K_FLAG_FIRST_BOOT_DONE, &ar->dev_flags))
-               ath10k_info("pci irq %s\n", irq_mode);
+               ath10k_info("pci irq %s irq_mode %d reset_mode %d\n",
+                           irq_mode, ath10k_pci_irq_mode,
+                           ath10k_pci_reset_mode);
 
        return 0;
 
@@ -1956,6 +1992,8 @@ static int ath10k_pci_hif_power_up(struct ath10k *ar)
 {
        int ret;
 
+       ath10k_dbg(ATH10K_DBG_BOOT, "boot hif power up\n");
+
        /*
         * Hardware CUS232 version 2 has some issues with cold reset and the
         * preferred (and safer) way to perform a device reset is through a
@@ -1966,9 +2004,14 @@ static int ath10k_pci_hif_power_up(struct ath10k *ar)
         */
        ret = __ath10k_pci_hif_power_up(ar, false);
        if (ret) {
-               ath10k_warn("failed to power up target using warm reset (%d), trying cold reset\n",
+               ath10k_warn("failed to power up target using warm reset: %d\n",
                            ret);
 
+               if (ath10k_pci_reset_mode == ATH10K_PCI_RESET_WARM_ONLY)
+                       return ret;
+
+               ath10k_warn("trying cold reset\n");
+
                ret = __ath10k_pci_hif_power_up(ar, true);
                if (ret) {
                        ath10k_err("failed to power up target using cold reset too (%d)\n",
@@ -1984,12 +2027,14 @@ static void ath10k_pci_hif_power_down(struct ath10k *ar)
 {
        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 
+       ath10k_dbg(ATH10K_DBG_BOOT, "boot hif power down\n");
+
        ath10k_pci_free_early_irq(ar);
        ath10k_pci_kill_tasklet(ar);
        ath10k_pci_deinit_irq(ar);
+       ath10k_pci_ce_deinit(ar);
        ath10k_pci_warm_reset(ar);
 
-       ath10k_pci_ce_deinit(ar);
        if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
                ath10k_do_pci_sleep(ar);
 }
@@ -2137,7 +2182,6 @@ static irqreturn_t ath10k_pci_interrupt_handler(int irq, void *arg)
 static void ath10k_pci_early_irq_tasklet(unsigned long data)
 {
        struct ath10k *ar = (struct ath10k *)data;
-       struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
        u32 fw_ind;
        int ret;
 
@@ -2148,9 +2192,9 @@ static void ath10k_pci_early_irq_tasklet(unsigned long data)
                return;
        }
 
-       fw_ind = ath10k_pci_read32(ar, ar_pci->fw_indicator_address);
+       fw_ind = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS);
        if (fw_ind & FW_IND_EVENT_PENDING) {
-               ath10k_pci_write32(ar, ar_pci->fw_indicator_address,
+               ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS,
                                   fw_ind & ~FW_IND_EVENT_PENDING);
 
                /* Some structures are unavailable during early boot or at
@@ -2385,33 +2429,50 @@ static int ath10k_pci_deinit_irq(struct ath10k *ar)
 static int ath10k_pci_wait_for_target_init(struct ath10k *ar)
 {
        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
-       int wait_limit = 300; /* 3 sec */
+       unsigned long timeout;
        int ret;
+       u32 val;
+
+       ath10k_dbg(ATH10K_DBG_BOOT, "boot waiting target to initialise\n");
 
        ret = ath10k_pci_wake(ar);
        if (ret) {
-               ath10k_err("failed to wake up target: %d\n", ret);
+               ath10k_err("failed to wake up target for init: %d\n", ret);
                return ret;
        }
 
-       while (wait_limit-- &&
-              !(ioread32(ar_pci->mem + FW_INDICATOR_ADDRESS) &
-                FW_IND_INITIALIZED)) {
+       timeout = jiffies + msecs_to_jiffies(ATH10K_PCI_TARGET_WAIT);
+
+       do {
+               val = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS);
+
+               ath10k_dbg(ATH10K_DBG_BOOT, "boot target indicator %x\n", val);
+
+               /* target should never return this */
+               if (val == 0xffffffff)
+                       continue;
+
+               if (val & FW_IND_INITIALIZED)
+                       break;
+
                if (ar_pci->num_msi_intrs == 0)
                        /* Fix potential race by repeating CORE_BASE writes */
-                       iowrite32(PCIE_INTR_FIRMWARE_MASK |
-                                 PCIE_INTR_CE_MASK_ALL,
-                                 ar_pci->mem + (SOC_CORE_BASE_ADDRESS |
-                                                PCIE_INTR_ENABLE_ADDRESS));
+                       ath10k_pci_soc_write32(ar, PCIE_INTR_ENABLE_ADDRESS,
+                                              PCIE_INTR_FIRMWARE_MASK |
+                                              PCIE_INTR_CE_MASK_ALL);
+
                mdelay(10);
-       }
+       } while (time_before(jiffies, timeout));
 
-       if (wait_limit < 0) {
-               ath10k_err("target stalled\n");
-               ret = -EIO;
+       if (val == 0xffffffff || !(val & FW_IND_INITIALIZED)) {
+               ath10k_err("failed to receive initialized event from target: %08x\n",
+                          val);
+               ret = -ETIMEDOUT;
                goto out;
        }
 
+       ath10k_dbg(ATH10K_DBG_BOOT, "boot target initialised\n");
+
 out:
        ath10k_pci_sleep(ar);
        return ret;
@@ -2422,6 +2483,8 @@ static int ath10k_pci_cold_reset(struct ath10k *ar)
        int i, ret;
        u32 val;
 
+       ath10k_dbg(ATH10K_DBG_BOOT, "boot cold reset\n");
+
        ret = ath10k_do_pci_wake(ar);
        if (ret) {
                ath10k_err("failed to wake up target: %d\n",
@@ -2453,6 +2516,9 @@ static int ath10k_pci_cold_reset(struct ath10k *ar)
        }
 
        ath10k_do_pci_sleep(ar);
+
+       ath10k_dbg(ATH10K_DBG_BOOT, "boot cold reset complete\n");
+
        return 0;
 }
 
@@ -2484,7 +2550,7 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
        struct ath10k_pci *ar_pci;
        u32 lcr_val, chip_id;
 
-       ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
+       ath10k_dbg(ATH10K_DBG_PCI, "pci probe\n");
 
        ar_pci = kzalloc(sizeof(*ar_pci), GFP_KERNEL);
        if (ar_pci == NULL)
@@ -2503,7 +2569,7 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
                goto err_ar_pci;
        }
 
-       if (ath10k_target_ps)
+       if (ath10k_pci_target_ps)
                set_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features);
 
        ath10k_pci_dump_features(ar_pci);
@@ -2516,7 +2582,6 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
        }
 
        ar_pci->ar = ar;
-       ar_pci->fw_indicator_address = FW_INDICATOR_ADDRESS;
        atomic_set(&ar_pci->keep_awake_count, 0);
 
        pci_set_drvdata(pdev, ar);
@@ -2594,16 +2659,24 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
 
        ath10k_do_pci_sleep(ar);
 
+       ret = ath10k_pci_alloc_ce(ar);
+       if (ret) {
+               ath10k_err("failed to allocate copy engine pipes: %d\n", ret);
+               goto err_iomap;
+       }
+
        ath10k_dbg(ATH10K_DBG_BOOT, "boot pci_mem 0x%p\n", ar_pci->mem);
 
        ret = ath10k_core_register(ar, chip_id);
        if (ret) {
                ath10k_err("failed to register driver core: %d\n", ret);
-               goto err_iomap;
+               goto err_free_ce;
        }
 
        return 0;
 
+err_free_ce:
+       ath10k_pci_free_ce(ar);
 err_iomap:
        pci_iounmap(pdev, mem);
 err_master:
@@ -2626,7 +2699,7 @@ static void ath10k_pci_remove(struct pci_dev *pdev)
        struct ath10k *ar = pci_get_drvdata(pdev);
        struct ath10k_pci *ar_pci;
 
-       ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
+       ath10k_dbg(ATH10K_DBG_PCI, "pci remove\n");
 
        if (!ar)
                return;
@@ -2639,6 +2712,7 @@ static void ath10k_pci_remove(struct pci_dev *pdev)
        tasklet_kill(&ar_pci->msi_fw_err);
 
        ath10k_core_unregister(ar);
+       ath10k_pci_free_ce(ar);
 
        pci_iounmap(pdev, ar_pci->mem);
        pci_release_region(pdev, BAR_NUM);
@@ -2680,6 +2754,5 @@ module_exit(ath10k_pci_exit);
 MODULE_AUTHOR("Qualcomm Atheros");
 MODULE_DESCRIPTION("Driver support for Atheros QCA988X PCIe devices");
 MODULE_LICENSE("Dual BSD/GPL");
-MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_FW_FILE);
-MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_OTP_FILE);
+MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_FW_2_FILE);
 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_BOARD_DATA_FILE);
index b43fdb4f731973544e1a55f700c779e4191d76bb..dfdebb4157aa177acde13ea1149b5a7490c5593e 100644 (file)
@@ -189,9 +189,6 @@ struct ath10k_pci {
 
        struct ath10k_hif_cb msg_callbacks_current;
 
-       /* Target address used to signal a pending firmware event */
-       u32 fw_indicator_address;
-
        /* Copy Engine used for Diagnostic Accesses */
        struct ath10k_ce_pipe *ce_diag;
 
index 0541dd939ce9d8be7dc7e195ef952526abab0255..82669a77e553b8f6902c7dc03f99e24b42a3dea6 100644 (file)
@@ -100,189 +100,6 @@ exit:
                wake_up(&htt->empty_tx_wq);
 }
 
-static const u8 rx_legacy_rate_idx[] = {
-       3,      /* 0x00  - 11Mbps  */
-       2,      /* 0x01  - 5.5Mbps */
-       1,      /* 0x02  - 2Mbps   */
-       0,      /* 0x03  - 1Mbps   */
-       3,      /* 0x04  - 11Mbps  */
-       2,      /* 0x05  - 5.5Mbps */
-       1,      /* 0x06  - 2Mbps   */
-       0,      /* 0x07  - 1Mbps   */
-       10,     /* 0x08  - 48Mbps  */
-       8,      /* 0x09  - 24Mbps  */
-       6,      /* 0x0A  - 12Mbps  */
-       4,      /* 0x0B  - 6Mbps   */
-       11,     /* 0x0C  - 54Mbps  */
-       9,      /* 0x0D  - 36Mbps  */
-       7,      /* 0x0E  - 18Mbps  */
-       5,      /* 0x0F  - 9Mbps   */
-};
-
-static void process_rx_rates(struct ath10k *ar, struct htt_rx_info *info,
-                            enum ieee80211_band band,
-                            struct ieee80211_rx_status *status)
-{
-       u8 cck, rate, rate_idx, bw, sgi, mcs, nss;
-       u8 info0 = info->rate.info0;
-       u32 info1 = info->rate.info1;
-       u32 info2 = info->rate.info2;
-       u8 preamble = 0;
-
-       /* Check if valid fields */
-       if (!(info0 & HTT_RX_INDICATION_INFO0_START_VALID))
-               return;
-
-       preamble = MS(info1, HTT_RX_INDICATION_INFO1_PREAMBLE_TYPE);
-
-       switch (preamble) {
-       case HTT_RX_LEGACY:
-               cck = info0 & HTT_RX_INDICATION_INFO0_LEGACY_RATE_CCK;
-               rate = MS(info0, HTT_RX_INDICATION_INFO0_LEGACY_RATE);
-               rate_idx = 0;
-
-               if (rate < 0x08 || rate > 0x0F)
-                       break;
-
-               switch (band) {
-               case IEEE80211_BAND_2GHZ:
-                       if (cck)
-                               rate &= ~BIT(3);
-                       rate_idx = rx_legacy_rate_idx[rate];
-                       break;
-               case IEEE80211_BAND_5GHZ:
-                       rate_idx = rx_legacy_rate_idx[rate];
-                       /* We are using same rate table registering
-                          HW - ath10k_rates[]. In case of 5GHz skip
-                          CCK rates, so -4 here */
-                       rate_idx -= 4;
-                       break;
-               default:
-                       break;
-               }
-
-               status->rate_idx = rate_idx;
-               break;
-       case HTT_RX_HT:
-       case HTT_RX_HT_WITH_TXBF:
-               /* HT-SIG - Table 20-11 in info1 and info2 */
-               mcs = info1 & 0x1F;
-               nss = mcs >> 3;
-               bw = (info1 >> 7) & 1;
-               sgi = (info2 >> 7) & 1;
-
-               status->rate_idx = mcs;
-               status->flag |= RX_FLAG_HT;
-               if (sgi)
-                       status->flag |= RX_FLAG_SHORT_GI;
-               if (bw)
-                       status->flag |= RX_FLAG_40MHZ;
-               break;
-       case HTT_RX_VHT:
-       case HTT_RX_VHT_WITH_TXBF:
-               /* VHT-SIG-A1 in info 1, VHT-SIG-A2 in info2
-                  TODO check this */
-               mcs = (info2 >> 4) & 0x0F;
-               nss = ((info1 >> 10) & 0x07) + 1;
-               bw = info1 & 3;
-               sgi = info2 & 1;
-
-               status->rate_idx = mcs;
-               status->vht_nss = nss;
-
-               if (sgi)
-                       status->flag |= RX_FLAG_SHORT_GI;
-
-               switch (bw) {
-               /* 20MHZ */
-               case 0:
-                       break;
-               /* 40MHZ */
-               case 1:
-                       status->flag |= RX_FLAG_40MHZ;
-                       break;
-               /* 80MHZ */
-               case 2:
-                       status->vht_flag |= RX_VHT_FLAG_80MHZ;
-               }
-
-               status->flag |= RX_FLAG_VHT;
-               break;
-       default:
-               break;
-       }
-}
-
-void ath10k_process_rx(struct ath10k *ar, struct htt_rx_info *info)
-{
-       struct ieee80211_rx_status *status;
-       struct ieee80211_channel *ch;
-       struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)info->skb->data;
-
-       status = IEEE80211_SKB_RXCB(info->skb);
-       memset(status, 0, sizeof(*status));
-
-       if (info->encrypt_type != HTT_RX_MPDU_ENCRYPT_NONE) {
-               status->flag |= RX_FLAG_DECRYPTED | RX_FLAG_IV_STRIPPED |
-                               RX_FLAG_MMIC_STRIPPED;
-               hdr->frame_control = __cpu_to_le16(
-                               __le16_to_cpu(hdr->frame_control) &
-                               ~IEEE80211_FCTL_PROTECTED);
-       }
-
-       if (info->mic_err)
-               status->flag |= RX_FLAG_MMIC_ERROR;
-
-       if (info->fcs_err)
-               status->flag |= RX_FLAG_FAILED_FCS_CRC;
-
-       if (info->amsdu_more)
-               status->flag |= RX_FLAG_AMSDU_MORE;
-
-       status->signal = info->signal;
-
-       spin_lock_bh(&ar->data_lock);
-       ch = ar->scan_channel;
-       if (!ch)
-               ch = ar->rx_channel;
-       spin_unlock_bh(&ar->data_lock);
-
-       if (!ch) {
-               ath10k_warn("no channel configured; ignoring frame!\n");
-               dev_kfree_skb_any(info->skb);
-               return;
-       }
-
-       process_rx_rates(ar, info, ch->band, status);
-       status->band = ch->band;
-       status->freq = ch->center_freq;
-
-       if (info->rate.info0 & HTT_RX_INDICATION_INFO0_END_VALID) {
-               /* TSF available only in 32-bit */
-               status->mactime = info->tsf & 0xffffffff;
-               status->flag |= RX_FLAG_MACTIME_END;
-       }
-
-       ath10k_dbg(ATH10K_DBG_DATA,
-                  "rx skb %p len %u %s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i\n",
-                  info->skb,
-                  info->skb->len,
-                  status->flag == 0 ? "legacy" : "",
-                  status->flag & RX_FLAG_HT ? "ht" : "",
-                  status->flag & RX_FLAG_VHT ? "vht" : "",
-                  status->flag & RX_FLAG_40MHZ ? "40" : "",
-                  status->vht_flag & RX_VHT_FLAG_80MHZ ? "80" : "",
-                  status->flag & RX_FLAG_SHORT_GI ? "sgi " : "",
-                  status->rate_idx,
-                  status->vht_nss,
-                  status->freq,
-                  status->band, status->flag, info->fcs_err);
-       ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "rx skb: ",
-                       info->skb->data, info->skb->len);
-
-       ieee80211_rx(ar->hw, info->skb);
-}
-
 struct ath10k_peer *ath10k_peer_find(struct ath10k *ar, int vdev_id,
                                     const u8 *addr)
 {
index 356dc9c04c9e3981feaeb04d8df4e8f45fc36f7f..aee3e20058f814f2e7a774005d84d77c684442f4 100644 (file)
@@ -21,7 +21,6 @@
 
 void ath10k_txrx_tx_unref(struct ath10k_htt *htt,
                          const struct htt_tx_done *tx_done);
-void ath10k_process_rx(struct ath10k *ar, struct htt_rx_info *info);
 
 struct ath10k_peer *ath10k_peer_find(struct ath10k *ar, int vdev_id,
                                     const u8 *addr);
index cb1f7b5bcf4cdefa774411e09fa39b80728d82e4..fe4d5f1c672f56b621db21370fa79a70214584bb 100644 (file)
@@ -1362,13 +1362,10 @@ static void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb)
        struct sk_buff *bcn;
        int ret, vdev_id = 0;
 
-       ath10k_dbg(ATH10K_DBG_MGMT, "WMI_HOST_SWBA_EVENTID\n");
-
        ev = (struct wmi_host_swba_event *)skb->data;
        map = __le32_to_cpu(ev->vdev_map);
 
-       ath10k_dbg(ATH10K_DBG_MGMT, "host swba:\n"
-                  "-vdev map 0x%x\n",
+       ath10k_dbg(ATH10K_DBG_MGMT, "mgmt swba vdev_map 0x%x\n",
                   ev->vdev_map);
 
        for (; map; map >>= 1, vdev_id++) {
@@ -1385,12 +1382,7 @@ static void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb)
                bcn_info = &ev->bcn_info[i];
 
                ath10k_dbg(ATH10K_DBG_MGMT,
-                          "-bcn_info[%d]:\n"
-                          "--tim_len %d\n"
-                          "--tim_mcast %d\n"
-                          "--tim_changed %d\n"
-                          "--tim_num_ps_pending %d\n"
-                          "--tim_bitmap 0x%08x%08x%08x%08x\n",
+                          "mgmt event bcn_info %d tim_len %d mcast %d changed %d num_ps_pending %d bitmap 0x%08x%08x%08x%08x\n",
                           i,
                           __le32_to_cpu(bcn_info->tim_info.tim_len),
                           __le32_to_cpu(bcn_info->tim_info.tim_mcast),
@@ -2393,8 +2385,9 @@ int ath10k_wmi_connect_htc_service(struct ath10k *ar)
        return 0;
 }
 
-int ath10k_wmi_pdev_set_regdomain(struct ath10k *ar, u16 rd, u16 rd2g,
-                                 u16 rd5g, u16 ctl2g, u16 ctl5g)
+static int ath10k_wmi_main_pdev_set_regdomain(struct ath10k *ar, u16 rd,
+                                             u16 rd2g, u16 rd5g, u16 ctl2g,
+                                             u16 ctl5g)
 {
        struct wmi_pdev_set_regdomain_cmd *cmd;
        struct sk_buff *skb;
@@ -2418,6 +2411,46 @@ int ath10k_wmi_pdev_set_regdomain(struct ath10k *ar, u16 rd, u16 rd2g,
                                   ar->wmi.cmd->pdev_set_regdomain_cmdid);
 }
 
+static int ath10k_wmi_10x_pdev_set_regdomain(struct ath10k *ar, u16 rd,
+                                            u16 rd2g, u16 rd5g,
+                                            u16 ctl2g, u16 ctl5g,
+                                            enum wmi_dfs_region dfs_reg)
+{
+       struct wmi_pdev_set_regdomain_cmd_10x *cmd;
+       struct sk_buff *skb;
+
+       skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
+       if (!skb)
+               return -ENOMEM;
+
+       cmd = (struct wmi_pdev_set_regdomain_cmd_10x *)skb->data;
+       cmd->reg_domain = __cpu_to_le32(rd);
+       cmd->reg_domain_2G = __cpu_to_le32(rd2g);
+       cmd->reg_domain_5G = __cpu_to_le32(rd5g);
+       cmd->conformance_test_limit_2G = __cpu_to_le32(ctl2g);
+       cmd->conformance_test_limit_5G = __cpu_to_le32(ctl5g);
+       cmd->dfs_domain = __cpu_to_le32(dfs_reg);
+
+       ath10k_dbg(ATH10K_DBG_WMI,
+                  "wmi pdev regdomain rd %x rd2g %x rd5g %x ctl2g %x ctl5g %x dfs_region %x\n",
+                  rd, rd2g, rd5g, ctl2g, ctl5g, dfs_reg);
+
+       return ath10k_wmi_cmd_send(ar, skb,
+                                  ar->wmi.cmd->pdev_set_regdomain_cmdid);
+}
+
+int ath10k_wmi_pdev_set_regdomain(struct ath10k *ar, u16 rd, u16 rd2g,
+                                 u16 rd5g, u16 ctl2g, u16 ctl5g,
+                                 enum wmi_dfs_region dfs_reg)
+{
+       if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features))
+               return ath10k_wmi_10x_pdev_set_regdomain(ar, rd, rd2g, rd5g,
+                                                       ctl2g, ctl5g, dfs_reg);
+       else
+               return ath10k_wmi_main_pdev_set_regdomain(ar, rd, rd2g, rd5g,
+                                                        ctl2g, ctl5g);
+}
+
 int ath10k_wmi_pdev_set_channel(struct ath10k *ar,
                                const struct wmi_channel_arg *arg)
 {
@@ -3456,8 +3489,9 @@ int ath10k_wmi_peer_assoc(struct ath10k *ar,
                __cpu_to_le32(arg->peer_vht_rates.tx_mcs_set);
 
        ath10k_dbg(ATH10K_DBG_WMI,
-                  "wmi peer assoc vdev %d addr %pM\n",
-                  arg->vdev_id, arg->addr);
+                  "wmi peer assoc vdev %d addr %pM (%s)\n",
+                  arg->vdev_id, arg->addr,
+                  arg->peer_reassoc ? "reassociate" : "new");
        return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_assoc_cmdid);
 }
 
index 4fcc96aa9513b89a45e11fd9c2d8da79414204c9..ae838221af65d46efd817f18f5bb872034e1c5c5 100644 (file)
@@ -198,16 +198,6 @@ struct wmi_mac_addr {
        } __packed;
 } __packed;
 
-/* macro to convert MAC address from WMI word format to char array */
-#define WMI_MAC_ADDR_TO_CHAR_ARRAY(pwmi_mac_addr, c_macaddr) do { \
-       (c_macaddr)[0] =  ((pwmi_mac_addr)->word0) & 0xff; \
-       (c_macaddr)[1] = (((pwmi_mac_addr)->word0) >> 8) & 0xff; \
-       (c_macaddr)[2] = (((pwmi_mac_addr)->word0) >> 16) & 0xff; \
-       (c_macaddr)[3] = (((pwmi_mac_addr)->word0) >> 24) & 0xff; \
-       (c_macaddr)[4] =  ((pwmi_mac_addr)->word1) & 0xff; \
-       (c_macaddr)[5] = (((pwmi_mac_addr)->word1) >> 8) & 0xff; \
-       } while (0)
-
 struct wmi_cmd_map {
        u32 init_cmdid;
        u32 start_scan_cmdid;
@@ -2185,6 +2175,31 @@ struct wmi_pdev_set_regdomain_cmd {
        __le32 conformance_test_limit_5G;
 } __packed;
 
+enum wmi_dfs_region {
+       /* Uninitialized dfs domain */
+       WMI_UNINIT_DFS_DOMAIN = 0,
+
+       /* FCC3 dfs domain */
+       WMI_FCC_DFS_DOMAIN = 1,
+
+       /* ETSI dfs domain */
+       WMI_ETSI_DFS_DOMAIN = 2,
+
+       /*Japan dfs domain */
+       WMI_MKK4_DFS_DOMAIN = 3,
+};
+
+struct wmi_pdev_set_regdomain_cmd_10x {
+       __le32 reg_domain;
+       __le32 reg_domain_2G;
+       __le32 reg_domain_5G;
+       __le32 conformance_test_limit_2G;
+       __le32 conformance_test_limit_5G;
+
+       /* dfs domain from wmi_dfs_region */
+       __le32 dfs_domain;
+} __packed;
+
 /* Command to set/unset chip in quiet mode */
 struct wmi_pdev_set_quiet_cmd {
        /* period in TUs */
@@ -2210,6 +2225,19 @@ enum ath10k_protmode {
        ATH10K_PROT_RTSCTS   = 2,    /* RTS-CTS */
 };
 
+enum wmi_rtscts_profile {
+       WMI_RTSCTS_FOR_NO_RATESERIES = 0,
+       WMI_RTSCTS_FOR_SECOND_RATESERIES,
+       WMI_RTSCTS_ACROSS_SW_RETRIES
+};
+
+#define WMI_RTSCTS_ENABLED             1
+#define WMI_RTSCTS_SET_MASK            0x0f
+#define WMI_RTSCTS_SET_LSB             0
+
+#define WMI_RTSCTS_PROFILE_MASK                0xf0
+#define WMI_RTSCTS_PROFILE_LSB         4
+
 enum wmi_beacon_gen_mode {
        WMI_BEACON_STAGGERED_MODE = 0,
        WMI_BEACON_BURST_MODE = 1
@@ -2682,6 +2710,9 @@ struct wal_dbg_tx_stats {
        /* wal pdev resets  */
        __le32 pdev_resets;
 
+       /* frames dropped due to non-availability of stateless TIDs */
+       __le32 stateless_tid_alloc_failure;
+
        __le32 phy_underrun;
 
        /* MPDU is more than txop limit */
@@ -2738,13 +2769,21 @@ enum wmi_stats_id {
        WMI_REQUEST_AP_STAT     = 0x02
 };
 
+struct wlan_inst_rssi_args {
+       __le16 cfg_retry_count;
+       __le16 retry_count;
+};
+
 struct wmi_request_stats_cmd {
        __le32 stats_id;
 
-       /*
-        * Space to add parameters like
-        * peer mac addr
-        */
+       __le32 vdev_id;
+
+       /* peer MAC address */
+       struct wmi_mac_addr peer_macaddr;
+
+       /* Instantaneous RSSI arguments */
+       struct wlan_inst_rssi_args inst_rssi_args;
 } __packed;
 
 /* Suspend option */
@@ -2795,7 +2834,7 @@ struct wmi_stats_event {
  * PDEV statistics
  * TODO: add all PDEV stats here
  */
-struct wmi_pdev_stats {
+struct wmi_pdev_stats_old {
        __le32 chan_nf;        /* Channel noise floor */
        __le32 tx_frame_count; /* TX frame count */
        __le32 rx_frame_count; /* RX frame count */
@@ -2806,6 +2845,23 @@ struct wmi_pdev_stats {
        struct wal_dbg_stats wal; /* WAL dbg stats */
 } __packed;
 
+struct wmi_pdev_stats_10x {
+       __le32 chan_nf;        /* Channel noise floor */
+       __le32 tx_frame_count; /* TX frame count */
+       __le32 rx_frame_count; /* RX frame count */
+       __le32 rx_clear_count; /* rx clear count */
+       __le32 cycle_count;    /* cycle count */
+       __le32 phy_err_count;  /* Phy error count */
+       __le32 chan_tx_pwr;    /* channel tx power */
+       struct wal_dbg_stats wal; /* WAL dbg stats */
+       __le32 ack_rx_bad;
+       __le32 rts_bad;
+       __le32 rts_good;
+       __le32 fcs_bad;
+       __le32 no_beacons;
+       __le32 mib_int_count;
+} __packed;
+
 /*
  * VDEV statistics
  * TODO: add all VDEV stats here
@@ -2818,10 +2874,17 @@ struct wmi_vdev_stats {
  * peer statistics.
  * TODO: add more stats
  */
-struct wmi_peer_stats {
+struct wmi_peer_stats_old {
+       struct wmi_mac_addr peer_macaddr;
+       __le32 peer_rssi;
+       __le32 peer_tx_rate;
+} __packed;
+
+struct wmi_peer_stats_10x {
        struct wmi_mac_addr peer_macaddr;
        __le32 peer_rssi;
        __le32 peer_tx_rate;
+       __le32 peer_rx_rate;
 } __packed;
 
 struct wmi_vdev_create_cmd {
@@ -4202,7 +4265,8 @@ int ath10k_wmi_pdev_set_channel(struct ath10k *ar,
 int ath10k_wmi_pdev_suspend_target(struct ath10k *ar, u32 suspend_opt);
 int ath10k_wmi_pdev_resume_target(struct ath10k *ar);
 int ath10k_wmi_pdev_set_regdomain(struct ath10k *ar, u16 rd, u16 rd2g,
-                                 u16 rd5g, u16 ctl2g, u16 ctl5g);
+                                 u16 rd5g, u16 ctl2g, u16 ctl5g,
+                                 enum wmi_dfs_region dfs_reg);
 int ath10k_wmi_pdev_set_param(struct ath10k *ar, u32 id, u32 value);
 int ath10k_wmi_cmd_init(struct ath10k *ar);
 int ath10k_wmi_start_scan(struct ath10k *ar, const struct wmi_start_scan_arg *);
index 1a2973b7acf2500f5c97c992315793d0b4b9a9a7..0fce1c76638e9c9ae103e01ac6752dced8b1670d 100644 (file)
@@ -3709,8 +3709,8 @@ ath5k_hw_txpower(struct ath5k_hw *ah, struct ieee80211_channel *channel,
                        AR5K_REG_MS(AR5K_TUNE_MAX_TXPOWER, AR5K_TPC_CHIRP),
                        AR5K_TPC);
        } else {
-               ath5k_hw_reg_write(ah, AR5K_PHY_TXPOWER_RATE_MAX |
-                       AR5K_TUNE_MAX_TXPOWER, AR5K_PHY_TXPOWER_RATE_MAX);
+               ath5k_hw_reg_write(ah, AR5K_TUNE_MAX_TXPOWER,
+                       AR5K_PHY_TXPOWER_RATE_MAX);
        }
 
        return 0;
index e39e5860a2e9347a2d44318e69b07410c4bd2dc5..9c125ff083f73de2c766bd5ccd6f3dc16aad7ae2 100644 (file)
@@ -1,11 +1,19 @@
 config ATH6KL
        tristate "Atheros mobile chipsets support"
+       depends on CFG80211
+        ---help---
+         This module adds core support for wireless adapters based on
+         Atheros AR6003 and AR6004 chipsets. You still need separate
+         bus drivers for USB and SDIO to be able to use real devices.
+
+         If you choose to build it as a module, it will be called
+         ath6kl_core. Please note that AR6002 and AR6001 are not
+         supported by this driver.
 
 config ATH6KL_SDIO
        tristate "Atheros ath6kl SDIO support"
        depends on ATH6KL
        depends on MMC
-       depends on CFG80211
        ---help---
          This module adds support for wireless adapters based on
          Atheros AR6003 and AR6004 chipsets running over SDIO. If you
@@ -17,25 +25,31 @@ config ATH6KL_USB
        tristate "Atheros ath6kl USB support"
        depends on ATH6KL
        depends on USB
-       depends on CFG80211
        ---help---
          This module adds support for wireless adapters based on
-         Atheros AR6004 chipset running over USB. This is still under
-         implementation and it isn't functional. If you choose to
-         build it as a module, it will be called ath6kl_usb.
+         Atheros AR6004 chipset and chipsets based on it running over
+         USB. If you choose to build it as a module, it will be
+         called ath6kl_usb.
 
 config ATH6KL_DEBUG
        bool "Atheros ath6kl debugging"
        depends on ATH6KL
        ---help---
-         Enables debug support
+         Enables ath6kl debug support, including debug messages
+         enabled with debug_mask module parameter and debugfs
+         interface.
+
+         If unsure, say Y to make it easier to debug problems.
 
 config ATH6KL_TRACING
        bool "Atheros ath6kl tracing support"
        depends on ATH6KL
        depends on EVENT_TRACING
        ---help---
-         Select this to ath6kl use tracing infrastructure.
+         Select this to ath6kl use tracing infrastructure which, for
+         example, can be enabled with help of trace-cmd. All debug
+         messages and commands are delivered to using individually
+         enablable trace points.
 
          If unsure, say Y to make it easier to debug problems.
 
@@ -47,3 +61,5 @@ config ATH6KL_REGDOMAIN
          Enabling this makes it possible to change the regdomain in
          the firmware. This can be only enabled if regulatory requirements
          are taken into account.
+
+         If unsure, say N.
index c2c6f460495859ae3517c4f20daa2c15caebdde0..09285084bcd3d0a17ab6c518b1d2ea4f616ffe26 100644 (file)
@@ -724,8 +724,9 @@ ath6kl_add_bss_if_needed(struct ath6kl_vif *vif,
                        ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
                                   "added bss %pM to cfg80211\n", bssid);
                kfree(ie);
-       } else
+       } else {
                ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "cfg80211 already has a bss\n");
+       }
 
        return bss;
 }
@@ -970,7 +971,6 @@ static int ath6kl_set_probed_ssids(struct ath6kl *ar,
                                          ssid_list[i].flag,
                                          ssid_list[i].ssid.ssid_len,
                                          ssid_list[i].ssid.ssid);
-
        }
 
        /* Make sure no old entries are left behind */
@@ -1897,7 +1897,6 @@ static int ath6kl_wow_usr(struct ath6kl *ar, struct ath6kl_vif *vif,
 
        /* Configure the patterns that we received from the user. */
        for (i = 0; i < wow->n_patterns; i++) {
-
                /*
                 * Convert given nl80211 specific mask value to equivalent
                 * driver specific mask value and send it to the chip along
@@ -2850,8 +2849,9 @@ static int ath6kl_start_ap(struct wiphy *wiphy, struct net_device *dev,
        if (p.prwise_crypto_type == 0) {
                p.prwise_crypto_type = NONE_CRYPT;
                ath6kl_set_cipher(vif, 0, true);
-       } else if (info->crypto.n_ciphers_pairwise == 1)
+       } else if (info->crypto.n_ciphers_pairwise == 1) {
                ath6kl_set_cipher(vif, info->crypto.ciphers_pairwise[0], true);
+       }
 
        switch (info->crypto.cipher_group) {
        case WLAN_CIPHER_SUITE_WEP40:
@@ -2897,7 +2897,6 @@ static int ath6kl_start_ap(struct wiphy *wiphy, struct net_device *dev,
        }
 
        if (info->inactivity_timeout) {
-
                inactivity_timeout = info->inactivity_timeout;
 
                if (ar->hw.flags & ATH6KL_HW_AP_INACTIVITY_MINS)
index 4b46adbe8c923b7dd410c919dc3cbc289541ea4f..b0b6520427600a05687e299318f78f873e3fd948 100644 (file)
@@ -45,9 +45,9 @@ module_param(testmode, uint, 0644);
 module_param(recovery_enable, uint, 0644);
 module_param(heart_beat_poll, uint, 0644);
 MODULE_PARM_DESC(recovery_enable, "Enable recovery from firmware error");
-MODULE_PARM_DESC(heart_beat_poll, "Enable fw error detection periodic"   \
-                "polling. This also specifies the polling interval in"  \
-                "msecs. Set reocvery_enable for this to be effective");
+MODULE_PARM_DESC(heart_beat_poll,
+                "Enable fw error detection periodic polling in msecs - Also set recovery_enable for this to be effective");
+
 
 void ath6kl_core_tx_complete(struct ath6kl *ar, struct sk_buff *skb)
 {
index dbfd17d0a5faa33fa390b150d8d39e03a42d3746..55c4064dd5067f26b8dfaf54689f85c9eb67a88e 100644 (file)
@@ -172,7 +172,6 @@ void ath6kl_dump_registers(struct ath6kl_device *dev,
                           struct ath6kl_irq_proc_registers *irq_proc_reg,
                           struct ath6kl_irq_enable_reg *irq_enable_reg)
 {
-
        ath6kl_dbg(ATH6KL_DBG_IRQ, ("<------- Register Table -------->\n"));
 
        if (irq_proc_reg != NULL) {
@@ -219,7 +218,6 @@ void ath6kl_dump_registers(struct ath6kl_device *dev,
                                   "GMBOX lookahead alias 1:   0x%x\n",
                                   irq_proc_reg->rx_gmbox_lkahd_alias[1]);
                }
-
        }
 
        if (irq_enable_reg != NULL) {
@@ -1396,7 +1394,6 @@ static ssize_t ath6kl_create_qos_write(struct file *file,
                                                const char __user *user_buf,
                                                size_t count, loff_t *ppos)
 {
-
        struct ath6kl *ar = file->private_data;
        struct ath6kl_vif *vif;
        char buf[200];
@@ -1575,7 +1572,6 @@ static ssize_t ath6kl_delete_qos_write(struct file *file,
                                const char __user *user_buf,
                                size_t count, loff_t *ppos)
 {
-
        struct ath6kl *ar = file->private_data;
        struct ath6kl_vif *vif;
        char buf[100];
index ca9ba005f2871f3e42bbc914bb5ca90f6e8b90e9..e194c10d9f0071725c1c5eba917347ae8209b0a0 100644 (file)
@@ -97,8 +97,8 @@ static inline void ath6kl_dump_registers(struct ath6kl_device *dev,
                struct ath6kl_irq_proc_registers *irq_proc_reg,
                struct ath6kl_irq_enable_reg *irq_en_reg)
 {
-
 }
+
 static inline void dump_cred_dist_stats(struct htc_target *target)
 {
 }
index fea7709b5dda59aa26fa0fbc5eb70caccea77918..18c070850a09b870900624a83ee5576551281cdb 100644 (file)
@@ -37,7 +37,6 @@ static int ath6kl_hif_cp_scat_dma_buf(struct hif_scatter_req *req,
        buf = req->virt_dma_buf;
 
        for (i = 0; i < req->scat_entries; i++) {
-
                if (from_dma)
                        memcpy(req->scat_list[i].buf, buf,
                               req->scat_list[i].len);
@@ -116,7 +115,6 @@ static void ath6kl_hif_dump_fw_crash(struct ath6kl *ar)
                            le32_to_cpu(regdump_val[i + 2]),
                            le32_to_cpu(regdump_val[i + 3]));
        }
-
 }
 
 static int ath6kl_hif_proc_dbg_intr(struct ath6kl_device *dev)
@@ -701,5 +699,4 @@ int ath6kl_hif_setup(struct ath6kl_device *dev)
 
 fail_setup:
        return status;
-
 }
index 61f6b21fb0aeeaa4e6ef20dfca3baddf6ad638cd..dc6bd8cd9b837d85155d494a0afd2b32b7775240 100644 (file)
@@ -197,9 +197,9 @@ struct hif_scatter_req {
        /* bounce buffer for upper layers to copy to/from */
        u8 *virt_dma_buf;
 
-       struct hif_scatter_item scat_list[1];
-
        u32 scat_q_depth;
+
+       struct hif_scatter_item scat_list[0];
 };
 
 struct ath6kl_irq_proc_registers {
index 65e5b719093d47943b3135e902cf68001eefd988..e481f14b98787e88354278d26e3c59615ff2851c 100644 (file)
@@ -112,9 +112,9 @@ static void ath6kl_credit_init(struct ath6kl_htc_credit_info *cred_info,
                if (cur_ep_dist->endpoint == ENDPOINT_0)
                        continue;
 
-               if (cur_ep_dist->svc_id == WMI_CONTROL_SVC)
+               if (cur_ep_dist->svc_id == WMI_CONTROL_SVC) {
                        cur_ep_dist->cred_norm = cur_ep_dist->cred_per_msg;
-               else {
+               else {
                        /*
                         * For the remaining data endpoints, we assume that
                         * each cred_per_msg are the same. We use a simple
@@ -129,7 +129,6 @@ static void ath6kl_credit_init(struct ath6kl_htc_credit_info *cred_info,
                        count = (count * 3) >> 2;
                        count = max(count, cur_ep_dist->cred_per_msg);
                        cur_ep_dist->cred_norm = count;
-
                }
 
                ath6kl_dbg(ATH6KL_DBG_CREDIT,
@@ -549,7 +548,6 @@ static int htc_check_credits(struct htc_target *target,
                             enum htc_endpoint_id eid, unsigned int len,
                             int *req_cred)
 {
-
        *req_cred = (len > target->tgt_cred_sz) ?
                     DIV_ROUND_UP(len, target->tgt_cred_sz) : 1;
 
@@ -608,7 +606,6 @@ static void ath6kl_htc_tx_pkts_get(struct htc_target *target,
        unsigned int len;
 
        while (true) {
-
                flags = 0;
 
                if (list_empty(&endpoint->txq))
@@ -889,7 +886,6 @@ static void ath6kl_htc_tx_from_queue(struct htc_target *target,
                ac = target->dev->ar->ep2ac_map[endpoint->eid];
 
        while (true) {
-
                if (list_empty(&endpoint->txq))
                        break;
 
@@ -1190,7 +1186,6 @@ static void ath6kl_htc_mbox_flush_txep(struct htc_target *target,
                list_add_tail(&packet->list, &container);
                htc_tx_complete(endpoint, &container);
        }
-
 }
 
 static void ath6kl_htc_flush_txep_all(struct htc_target *target)
@@ -1394,7 +1389,6 @@ static int ath6kl_htc_rx_setup(struct htc_target *target,
 
        ep_cb = ep->ep_cb;
        for (j = 0; j < n_msg; j++) {
-
                /*
                 * Reset flag, any packets allocated using the
                 * rx_alloc() API cannot be recycled on
@@ -1424,9 +1418,9 @@ static int ath6kl_htc_rx_setup(struct htc_target *target,
                                }
                        }
 
-                       if (list_empty(&ep->rx_bufq))
+                       if (list_empty(&ep->rx_bufq)) {
                                packet = NULL;
-                       else {
+                       else {
                                packet = list_first_entry(&ep->rx_bufq,
                                                struct htc_packet, list);
                                list_del(&packet->list);
@@ -1487,7 +1481,6 @@ static int ath6kl_htc_rx_alloc(struct htc_target *target,
        spin_lock_bh(&target->rx_lock);
 
        for (i = 0; i < msg; i++) {
-
                htc_hdr = (struct htc_frame_hdr *)&lk_ahds[i];
 
                if (htc_hdr->eid >= ENDPOINT_MAX) {
@@ -1708,7 +1701,6 @@ static int htc_parse_trailer(struct htc_target *target,
                lk_ahd = (struct htc_lookahead_report *) record_buf;
                if ((lk_ahd->pre_valid == ((~lk_ahd->post_valid) & 0xFF)) &&
                    next_lk_ahds) {
-
                        ath6kl_dbg(ATH6KL_DBG_HTC,
                                   "htc rx lk_ahd found pre_valid 0x%x post_valid 0x%x\n",
                                   lk_ahd->pre_valid, lk_ahd->post_valid);
@@ -1755,7 +1747,6 @@ static int htc_parse_trailer(struct htc_target *target,
        }
 
        return 0;
-
 }
 
 static int htc_proc_trailer(struct htc_target *target,
@@ -1776,7 +1767,6 @@ static int htc_proc_trailer(struct htc_target *target,
        status = 0;
 
        while (len > 0) {
-
                if (len < sizeof(struct htc_record_hdr)) {
                        status = -ENOMEM;
                        break;
@@ -2098,7 +2088,6 @@ static int ath6kl_htc_rx_fetch(struct htc_target *target,
                }
 
                if (!fetched_pkts) {
-
                        packet = list_first_entry(rx_pktq, struct htc_packet,
                                                   list);
 
@@ -2173,7 +2162,6 @@ int ath6kl_htc_rxmsg_pending_handler(struct htc_target *target,
        look_aheads[0] = msg_look_ahead;
 
        while (true) {
-
                /*
                 * First lookahead sets the expected endpoint IDs for all
                 * packets in a bundle.
@@ -2825,8 +2813,9 @@ static int ath6kl_htc_reset(struct htc_target *target)
                        packet->buf = packet->buf_start;
                        packet->endpoint = ENDPOINT_0;
                        list_add_tail(&packet->list, &target->free_ctrl_rxbuf);
-               } else
+               } else {
                        list_add_tail(&packet->list, &target->free_ctrl_txbuf);
+               }
        }
 
        return 0;
index 67aa924ed8b317f9d5240ad1560a820553790c24..756fe52a12c8ad5a3496de58bbfec01d0ad9820b 100644 (file)
@@ -137,7 +137,6 @@ static void get_htc_packet_credit_based(struct htc_target *target,
                        credits_required = 0;
 
                } else {
-
                        if (ep->cred_dist.credits < credits_required)
                                break;
 
@@ -169,7 +168,6 @@ static void get_htc_packet_credit_based(struct htc_target *target,
                /* queue this packet into the caller's queue */
                list_add_tail(&packet->list, queue);
        }
-
 }
 
 static void get_htc_packet(struct htc_target *target,
@@ -279,7 +277,6 @@ static int htc_issue_packets(struct htc_target *target,
                        list_add(&packet->list, pkt_queue);
                        break;
                }
-
        }
 
        if (status != 0) {
@@ -385,7 +382,6 @@ static enum htc_send_queue_result htc_try_send(struct htc_target *target,
                         */
                        list_for_each_entry_safe(packet, tmp_pkt,
                                                 txq, list) {
-
                                ath6kl_dbg(ATH6KL_DBG_HTC,
                                           "%s: Indicat overflowed TX pkts: %p\n",
                                           __func__, packet);
@@ -403,7 +399,6 @@ static enum htc_send_queue_result htc_try_send(struct htc_target *target,
                                        list_move_tail(&packet->list,
                                                       &send_queue);
                                }
-
                        }
 
                        if (list_empty(&send_queue)) {
@@ -454,7 +449,6 @@ static enum htc_send_queue_result htc_try_send(struct htc_target *target,
         * enough transmit resources.
         */
        while (true) {
-
                if (get_queue_depth(&ep->txq) == 0)
                        break;
 
@@ -495,8 +489,8 @@ static enum htc_send_queue_result htc_try_send(struct htc_target *target,
                }
 
                spin_lock_bh(&target->tx_lock);
-
        }
+
        /* done with this endpoint, we can clear the count */
        ep->tx_proc_cnt = 0;
        spin_unlock_bh(&target->tx_lock);
@@ -1106,7 +1100,6 @@ free_skb:
        dev_kfree_skb(skb);
 
        return status;
-
 }
 
 static void htc_flush_rx_queue(struct htc_target *target,
@@ -1258,7 +1251,6 @@ static int ath6kl_htc_pipe_conn_service(struct htc_target *target,
                tx_alloc = 0;
 
        } else {
-
                tx_alloc = htc_get_credit_alloc(target, conn_req->svc_id);
                if (tx_alloc == 0) {
                        status = -ENOMEM;
index 4f316bdcbab58da3911c6a2365da1989ba412744..d5ef211f261c2c19e6e8deeef985dc2b83130794 100644 (file)
@@ -1192,7 +1192,6 @@ static int ath6kl_upload_board_file(struct ath6kl *ar)
 
        if (board_ext_address &&
            ar->fw_board_len == (board_data_size + board_ext_data_size)) {
-
                /* write extended board data */
                ath6kl_dbg(ATH6KL_DBG_BOOT,
                           "writing extended board data to 0x%x (%d B)\n",
index 5839fc23bdc789d5013f1c89e48f0f65eff37efe..d56554674da47924477c02423ad08c50caef2918 100644 (file)
@@ -571,7 +571,6 @@ void ath6kl_scan_complete_evt(struct ath6kl_vif *vif, int status)
 
 static int ath6kl_commit_ch_switch(struct ath6kl_vif *vif, u16 channel)
 {
-
        struct ath6kl *ar = vif->ar;
 
        vif->profile.ch = cpu_to_le16(channel);
@@ -600,7 +599,6 @@ static int ath6kl_commit_ch_switch(struct ath6kl_vif *vif, u16 channel)
 
 static void ath6kl_check_ch_switch(struct ath6kl *ar, u16 channel)
 {
-
        struct ath6kl_vif *vif;
        int res = 0;
 
@@ -692,9 +690,9 @@ void ath6kl_tkip_micerr_event(struct ath6kl_vif *vif, u8 keyid, bool ismcast)
                cfg80211_michael_mic_failure(vif->ndev, sta->mac,
                                             NL80211_KEYTYPE_PAIRWISE, keyid,
                                             tsc, GFP_KERNEL);
-       } else
+       } else {
                ath6kl_cfg80211_tkip_micerr_event(vif, keyid, ismcast);
-
+       }
 }
 
 static void ath6kl_update_target_stats(struct ath6kl_vif *vif, u8 *ptr, u32 len)
@@ -1093,8 +1091,9 @@ static int ath6kl_open(struct net_device *dev)
        if (test_bit(CONNECTED, &vif->flags)) {
                netif_carrier_on(dev);
                netif_wake_queue(dev);
-       } else
+       } else {
                netif_carrier_off(dev);
+       }
 
        return 0;
 }
@@ -1146,7 +1145,6 @@ static int ath6kl_set_features(struct net_device *dev,
                        dev->features = features | NETIF_F_RXCSUM;
                        return err;
                }
-
        }
 
        return err;
index 7126bdd4236c2b1e78dd3a9d2c600dbadbb53f0e..339d89f14d32b1991c3d3d646f460b90f41411bb 100644 (file)
@@ -348,7 +348,7 @@ static int ath6kl_sdio_alloc_prep_scat_req(struct ath6kl_sdio *ar_sdio,
        int i, scat_req_sz, scat_list_sz, size;
        u8 *virt_buf;
 
-       scat_list_sz = (n_scat_entry - 1) * sizeof(struct hif_scatter_item);
+       scat_list_sz = n_scat_entry * sizeof(struct hif_scatter_item);
        scat_req_sz = sizeof(*s_req) + scat_list_sz;
 
        if (!virt_scat)
@@ -425,8 +425,9 @@ static int ath6kl_sdio_read_write_sync(struct ath6kl *ar, u32 addr, u8 *buf,
                        memcpy(tbuf, buf, len);
 
                bounced = true;
-       } else
+       } else {
                tbuf = buf;
+       }
 
        ret = ath6kl_sdio_io(ar_sdio->func, request, addr, tbuf, len);
        if ((request & HIF_READ) && bounced)
@@ -441,9 +442,9 @@ static int ath6kl_sdio_read_write_sync(struct ath6kl *ar, u32 addr, u8 *buf,
 static void __ath6kl_sdio_write_async(struct ath6kl_sdio *ar_sdio,
                                      struct bus_request *req)
 {
-       if (req->scat_req)
+       if (req->scat_req) {
                ath6kl_sdio_scat_rw(ar_sdio, req);
-       else {
+       else {
                void *context;
                int status;
 
@@ -656,7 +657,6 @@ static void ath6kl_sdio_scatter_req_add(struct ath6kl *ar,
        list_add_tail(&s_req->list, &ar_sdio->scat_req);
 
        spin_unlock_bh(&ar_sdio->scat_lock);
-
 }
 
 /* scatter gather read write request */
@@ -674,9 +674,9 @@ static int ath6kl_sdio_async_rw_scatter(struct ath6kl *ar,
                   "hif-scatter: total len: %d scatter entries: %d\n",
                   scat_req->len, scat_req->scat_entries);
 
-       if (request & HIF_SYNCHRONOUS)
+       if (request & HIF_SYNCHRONOUS) {
                status = ath6kl_sdio_scat_rw(ar_sdio, scat_req->busrequest);
-       else {
+       else {
                spin_lock_bh(&ar_sdio->wr_async_lock);
                list_add_tail(&scat_req->busrequest->list, &ar_sdio->wr_asyncq);
                spin_unlock_bh(&ar_sdio->wr_async_lock);
@@ -856,7 +856,6 @@ static int ath6kl_sdio_suspend(struct ath6kl *ar, struct cfg80211_wowlan *wow)
 
        if (ar->suspend_mode == WLAN_POWER_STATE_WOW ||
            (!ar->suspend_mode && wow)) {
-
                ret = ath6kl_set_sdio_pm_caps(ar);
                if (ret)
                        goto cut_pwr;
@@ -878,7 +877,6 @@ static int ath6kl_sdio_suspend(struct ath6kl *ar, struct cfg80211_wowlan *wow)
 
        if (ar->suspend_mode == WLAN_POWER_STATE_DEEP_SLEEP ||
            !ar->suspend_mode || try_deepsleep) {
-
                flags = sdio_get_host_pm_caps(func);
                if (!(flags & MMC_PM_KEEP_POWER))
                        goto cut_pwr;
@@ -1061,7 +1059,6 @@ static int ath6kl_sdio_bmi_credits(struct ath6kl *ar)
 
        timeout = jiffies + msecs_to_jiffies(BMI_COMMUNICATION_TIMEOUT);
        while (time_before(jiffies, timeout) && !ar->bmi.cmd_credits) {
-
                /*
                 * Hit the credit counter with a 4-byte access, the first byte
                 * read will hit the counter and cause a decrement, while the
index a580a629a0da6ba24b251dab8147e7de74c34f7c..d5eeeae7711b253c7dcddbf4214613cd5aff86f7 100644 (file)
@@ -289,7 +289,7 @@ struct host_interest {
        u32 hi_hp_rx_traffic_ratio;                    /* 0xd8 */
 
        /* test applications flags */
-       u32 hi_test_apps_related    ;                  /* 0xdc */
+       u32 hi_test_apps_related;                      /* 0xdc */
        /* location of test script */
        u32 hi_ota_testscript;                         /* 0xe0 */
        /* location of CAL data */
index ebb24045a8ae6cbcac844a239d11a05ee6f86e14..40432fe7a5d2cc112a4ce6d68bb8fd43a115e01f 100644 (file)
@@ -125,8 +125,9 @@ static bool ath6kl_process_uapsdq(struct ath6kl_sta *conn,
                *flags |= WMI_DATA_HDR_FLAGS_UAPSD;
                spin_unlock_bh(&conn->psq_lock);
                return false;
-       } else if (!conn->apsd_info)
+       } else if (!conn->apsd_info) {
                return false;
+       }
 
        if (test_bit(WMM_ENABLED, &vif->flags)) {
                ether_type = be16_to_cpu(datap->h_proto);
@@ -316,8 +317,9 @@ int ath6kl_control_tx(void *devt, struct sk_buff *skb,
                cookie = NULL;
                ath6kl_err("wmi ctrl ep full, dropping pkt : 0x%p, len:%d\n",
                           skb, skb->len);
-       } else
+       } else {
                cookie = ath6kl_alloc_cookie(ar);
+       }
 
        if (cookie == NULL) {
                spin_unlock_bh(&ar->lock);
@@ -359,7 +361,7 @@ int ath6kl_data_tx(struct sk_buff *skb, struct net_device *dev)
        struct ath6kl_vif *vif = netdev_priv(dev);
        u32 map_no = 0;
        u16 htc_tag = ATH6KL_DATA_PKT_TAG;
-       u8 ac = 99 ; /* initialize to unmapped ac */
+       u8 ac = 99; /* initialize to unmapped ac */
        bool chk_adhoc_ps_mapping = false;
        int ret;
        struct wmi_tx_meta_v2 meta_v2;
@@ -449,8 +451,9 @@ int ath6kl_data_tx(struct sk_buff *skb, struct net_device *dev)
                        if (ret)
                                goto fail_tx;
                }
-       } else
+       } else {
                goto fail_tx;
+       }
 
        spin_lock_bh(&ar->lock);
 
@@ -702,7 +705,6 @@ void ath6kl_tx_complete(struct htc_target *target,
 
        /* reap completed packets */
        while (!list_empty(packet_queue)) {
-
                packet = list_first_entry(packet_queue, struct htc_packet,
                                          list);
                list_del(&packet->list);
@@ -1089,8 +1091,9 @@ static void aggr_deque_frms(struct aggr_info_conn *agg_conn, u8 tid,
                        else
                                skb_queue_tail(&rxtid->q, node->skb);
                        node->skb = NULL;
-               } else
+               } else {
                        stats->num_hole++;
+               }
 
                rxtid->seq_next = ATH6KL_NEXT_SEQ_NO(rxtid->seq_next);
                idx = AGGR_WIN_IDX(rxtid->seq_next, rxtid->hold_q_sz);
@@ -1211,7 +1214,7 @@ static bool aggr_process_recv_frm(struct aggr_info_conn *agg_conn, u8 tid,
                return is_queued;
 
        spin_lock_bh(&rxtid->lock);
-       for (idx = 0 ; idx < rxtid->hold_q_sz; idx++) {
+       for (idx = 0; idx < rxtid->hold_q_sz; idx++) {
                if (rxtid->hold_q[idx].skb) {
                        /*
                         * There is a frame in the queue and no
@@ -1265,7 +1268,6 @@ static void ath6kl_uapsd_trigger_frame_rx(struct ath6kl_vif *vif,
        is_apsdq_empty_at_start = is_apsdq_empty;
 
        while ((!is_apsdq_empty) && (num_frames_to_deliver)) {
-
                spin_lock_bh(&conn->psq_lock);
                skb = skb_dequeue(&conn->apsdq);
                is_apsdq_empty = skb_queue_empty(&conn->apsdq);
@@ -1606,16 +1608,18 @@ void ath6kl_rx(struct htc_target *target, struct htc_packet *packet)
                        if (!conn)
                                return;
                        aggr_conn = conn->aggr_conn;
-               } else
+               } else {
                        aggr_conn = vif->aggr_cntxt->aggr_conn;
+               }
 
                if (aggr_process_recv_frm(aggr_conn, tid, seq_no,
                                          is_amsdu, skb)) {
                        /* aggregation code will handle the skb */
                        return;
                }
-       } else if (!is_broadcast_ether_addr(datap->h_dest))
+       } else if (!is_broadcast_ether_addr(datap->h_dest)) {
                vif->net_stats.multicast++;
+       }
 
        ath6kl_deliver_frames_to_nw_stack(vif->ndev, skb);
 }
@@ -1710,8 +1714,9 @@ void aggr_recv_addba_req_evt(struct ath6kl_vif *vif, u8 tid_mux, u16 seq_no,
                sta = ath6kl_find_sta_by_aid(vif->ar, aid);
                if (sta)
                        aggr_conn = sta->aggr_conn;
-       } else
+       } else {
                aggr_conn = vif->aggr_cntxt->aggr_conn;
+       }
 
        if (!aggr_conn)
                return;
@@ -1766,7 +1771,6 @@ void aggr_conn_init(struct ath6kl_vif *vif, struct aggr_info *aggr_info,
                skb_queue_head_init(&rxtid->q);
                spin_lock_init(&rxtid->lock);
        }
-
 }
 
 struct aggr_info *aggr_init(struct ath6kl_vif *vif)
@@ -1806,8 +1810,9 @@ void aggr_recv_delba_req_evt(struct ath6kl_vif *vif, u8 tid_mux)
                sta = ath6kl_find_sta_by_aid(vif->ar, aid);
                if (sta)
                        aggr_conn = sta->aggr_conn;
-       } else
+       } else {
                aggr_conn = vif->aggr_cntxt->aggr_conn;
+       }
 
        if (!aggr_conn)
                return;
index 56c3fd5cef65a07e915d63d8c0dc24727a87406b..3afc5a463d06f822f339250deecfb1cefadea592 100644 (file)
@@ -236,7 +236,6 @@ static void ath6kl_usb_free_pipe_resources(struct ath6kl_usb_pipe *pipe)
                        break;
                kfree(urb_context);
        }
-
 }
 
 static void ath6kl_usb_cleanup_pipe_resources(struct ath6kl_usb *ar_usb)
@@ -245,7 +244,6 @@ static void ath6kl_usb_cleanup_pipe_resources(struct ath6kl_usb *ar_usb)
 
        for (i = 0; i < ATH6KL_USB_PIPE_MAX; i++)
                ath6kl_usb_free_pipe_resources(&ar_usb->pipes[i]);
-
 }
 
 static u8 ath6kl_usb_get_logical_pipe_num(struct ath6kl_usb *ar_usb,
index 8b4ce28e3ce8f51f7cda070364394a117d5aef66..0c0e1e36e40fe7359e7190f2abc2015ab79578bb 100644 (file)
@@ -289,8 +289,9 @@ int ath6kl_wmi_implicit_create_pstream(struct wmi *wmi, u8 if_idx,
                           ath6kl_wmi_determine_user_priority(((u8 *) llc_hdr) +
                                        sizeof(struct ath6kl_llc_snap_hdr),
                                        layer2_priority);
-               } else
+               } else {
                        usr_pri = layer2_priority & 0x7;
+               }
 
                /*
                 * Queue the EAPOL frames in the same WMM_AC_VO queue
@@ -359,8 +360,9 @@ int ath6kl_wmi_dot11_hdr_remove(struct wmi *wmi, struct sk_buff *skb)
                hdr_size = roundup(sizeof(struct ieee80211_qos_hdr),
                                   sizeof(u32));
                skb_pull(skb, hdr_size);
-       } else if (sub_type == cpu_to_le16(IEEE80211_STYPE_DATA))
+       } else if (sub_type == cpu_to_le16(IEEE80211_STYPE_DATA)) {
                skb_pull(skb, sizeof(struct ieee80211_hdr_3addr));
+       }
 
        datap = skb->data;
        llc_hdr = (struct ath6kl_llc_snap_hdr *)(datap);
@@ -936,7 +938,6 @@ ath6kl_regd_find_country_by_rd(u16 regdmn)
 
 static void ath6kl_wmi_regdomain_event(struct wmi *wmi, u8 *datap, int len)
 {
-
        struct ath6kl_wmi_regdomain *ev;
        struct country_code_to_enum_rd *country = NULL;
        struct reg_dmn_pair_mapping *regpair = NULL;
@@ -946,10 +947,9 @@ static void ath6kl_wmi_regdomain_event(struct wmi *wmi, u8 *datap, int len)
        ev = (struct ath6kl_wmi_regdomain *) datap;
        reg_code = le32_to_cpu(ev->reg_code);
 
-       if ((reg_code >> ATH6KL_COUNTRY_RD_SHIFT) & COUNTRY_ERD_FLAG)
+       if ((reg_code >> ATH6KL_COUNTRY_RD_SHIFT) & COUNTRY_ERD_FLAG) {
                country = ath6kl_regd_find_country((u16) reg_code);
-       else if (!(((u16) reg_code & WORLD_SKU_MASK) == WORLD_SKU_PREFIX)) {
-
+       } else if (!(((u16) reg_code & WORLD_SKU_MASK) == WORLD_SKU_PREFIX)) {
                regpair = ath6kl_get_regpair((u16) reg_code);
                country = ath6kl_regd_find_country_by_rd((u16) reg_code);
                if (regpair)
@@ -1499,7 +1499,6 @@ static int ath6kl_wmi_cac_event_rx(struct wmi *wmi, u8 *datap, int len,
 
        if ((reply->cac_indication == CAC_INDICATION_ADMISSION_RESP) &&
            (reply->status_code != IEEE80211_TSPEC_STATUS_ADMISS_ACCEPTED)) {
-
                ts = (struct ieee80211_tspec_ie *) &(reply->tspec_suggestion);
                tsinfo = le16_to_cpu(ts->tsinfo);
                tsid = (tsinfo >> IEEE80211_WMM_IE_TSPEC_TID_SHIFT) &
@@ -1530,7 +1529,6 @@ static int ath6kl_wmi_cac_event_rx(struct wmi *wmi, u8 *datap, int len,
         * for delete qos stream from AP
         */
        else if (reply->cac_indication == CAC_INDICATION_DELETE) {
-
                ts = (struct ieee80211_tspec_ie *) &(reply->tspec_suggestion);
                tsinfo = le16_to_cpu(ts->tsinfo);
                ts_id = ((tsinfo >> IEEE80211_WMM_IE_TSPEC_TID_SHIFT) &
@@ -2479,7 +2477,6 @@ static int ath6kl_wmi_sync_point(struct wmi *wmi, u8 if_idx)
                goto free_data_skb;
 
        for (index = 0; index < num_pri_streams; index++) {
-
                if (WARN_ON(!data_sync_bufs[index].skb))
                        goto free_data_skb;
 
@@ -2704,7 +2701,6 @@ static void ath6kl_wmi_relinquish_implicit_pstream_credits(struct wmi *wmi)
 
        for (i = 0; i < WMM_NUM_AC; i++) {
                if (stream_exist & (1 << i)) {
-
                        /*
                         * FIXME: Is this lock & unlock inside
                         * for loop correct? may need rework.
@@ -2870,8 +2866,9 @@ int ath6kl_wmi_set_host_sleep_mode_cmd(struct wmi *wmi, u8 if_idx,
        if (host_mode == ATH6KL_HOST_MODE_ASLEEP) {
                ath6kl_wmi_relinquish_implicit_pstream_credits(wmi);
                cmd->asleep = cpu_to_le32(1);
-       } else
+       } else {
                cmd->awake = cpu_to_le32(1);
+       }
 
        ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb,
                                  WMI_SET_HOST_SLEEP_MODE_CMDID,
index b5f226503baf7e37ea15c7953878f7a05185375f..1f05ecd97c912b1efdfdea3910abd3aca71be38a 100644 (file)
@@ -898,7 +898,6 @@ struct wmi_start_scan_cmd {
  *  flags here
  */
 enum wmi_scan_ctrl_flags_bits {
-
        /* set if can scan in the connect cmd */
        CONNECT_SCAN_CTRL_FLAGS = 0x01,
 
index a0398fe3eb284f94e650f4541e18bfcd6713e5ef..be3eb2a8d602ee9096cf2f7b798d4ea5807dba82 100644 (file)
@@ -86,7 +86,6 @@ static int ath_ahb_probe(struct platform_device *pdev)
        int irq;
        int ret = 0;
        struct ath_hw *ah;
-       struct ath_common *common;
        char hw_name[64];
 
        if (!dev_get_platdata(&pdev->dev)) {
@@ -146,9 +145,6 @@ static int ath_ahb_probe(struct platform_device *pdev)
        wiphy_info(hw->wiphy, "%s mem=0x%lx, irq=%d\n",
                   hw_name, (unsigned long)mem, irq);
 
-       common = ath9k_hw_common(sc->sc_ah);
-       /* Will be cleared in ath9k_start() */
-       set_bit(ATH_OP_INVALID, &common->op_flags);
        return 0;
 
  err_irq:
index 6d47783f2e5b7ecfd4343c29c8ad4bcbff7252aa..ba502a2d199bc2c85e00718d7f1cda1f71e90e43 100644 (file)
@@ -155,6 +155,9 @@ static void ath9k_hw_set_ofdm_nil(struct ath_hw *ah, u8 immunityLevel,
                ATH9K_ANI_RSSI_THR_LOW,
                ATH9K_ANI_RSSI_THR_HIGH);
 
+       if (AR_SREV_9100(ah) && immunityLevel < ATH9K_ANI_OFDM_DEF_LEVEL)
+               immunityLevel = ATH9K_ANI_OFDM_DEF_LEVEL;
+
        if (!scan)
                aniState->ofdmNoiseImmunityLevel = immunityLevel;
 
@@ -235,6 +238,9 @@ static void ath9k_hw_set_cck_nil(struct ath_hw *ah, u_int8_t immunityLevel,
                BEACON_RSSI(ah), ATH9K_ANI_RSSI_THR_LOW,
                ATH9K_ANI_RSSI_THR_HIGH);
 
+       if (AR_SREV_9100(ah) && immunityLevel < ATH9K_ANI_CCK_DEF_LEVEL)
+               immunityLevel = ATH9K_ANI_CCK_DEF_LEVEL;
+
        if (ah->opmode == NL80211_IFTYPE_STATION &&
            BEACON_RSSI(ah) <= ATH9K_ANI_RSSI_THR_LOW &&
            immunityLevel > ATH9K_ANI_CCK_MAX_LEVEL_LOW_RSSI)
index 0a6163e9248c0fdefa0f0e78a695201fb8a7d0f2..c38399bc9aa96e84fce4929319599e1d4db694c7 100644 (file)
@@ -410,7 +410,7 @@ static const u32 ar9300_2p2_baseband_core[][2] = {
        {0x00009e30, 0x06336f77},
        {0x00009e34, 0x6af6532f},
        {0x00009e38, 0x0cc80c00},
-       {0x00009e40, 0x0d261820},
+       {0x00009e40, 0x0d261800},
        {0x00009e4c, 0x00001004},
        {0x00009e50, 0x00ff03f1},
        {0x00009e54, 0x00000000},
index f76139bbb74f0fcf144e22dd382be140c48cf08b..2c42ff05efa38f507cdd0f54905d8b79c0c77d32 100644 (file)
@@ -592,7 +592,7 @@ static const u32 ar9331_1p1_baseband_core[][2] = {
        {0x00009e30, 0x06336f77},
        {0x00009e34, 0x6af6532f},
        {0x00009e38, 0x0cc80c00},
-       {0x00009e40, 0x0d261820},
+       {0x00009e40, 0x0d261800},
        {0x00009e4c, 0x00001004},
        {0x00009e50, 0x00ff03f1},
        {0x00009fc0, 0x803e4788},
index 0ac8be96097f2e70f436f75aadb4be1b2d2bc44b..2154efcd3900514af944619a174db08d2514a140 100644 (file)
@@ -231,7 +231,7 @@ static const u32 ar9331_1p2_baseband_core[][2] = {
        {0x00009e30, 0x06336f77},
        {0x00009e34, 0x6af6532f},
        {0x00009e38, 0x0cc80c00},
-       {0x00009e40, 0x0d261820},
+       {0x00009e40, 0x0d261800},
        {0x00009e4c, 0x00001004},
        {0x00009e50, 0x00ff03f1},
        {0x00009fc0, 0x803e4788},
index a01f0edb65182a16b95181038786c9d8073a1cab..b995ffe88b33bb8af6ca6a7aaa47e23adee34857 100644 (file)
@@ -318,7 +318,7 @@ static const u32 ar9340_1p0_baseband_core[][2] = {
        {0x00009e30, 0x06336f77},
        {0x00009e34, 0x6af6532f},
        {0x00009e38, 0x0cc80c00},
-       {0x00009e40, 0x0d261820},
+       {0x00009e40, 0x0d261800},
        {0x00009e4c, 0x00001004},
        {0x00009e50, 0x00ff03f1},
        {0x00009e54, 0x00000000},
@@ -348,9 +348,9 @@ static const u32 ar9340_1p0_baseband_core[][2] = {
        {0x0000a370, 0x00000000},
        {0x0000a390, 0x00000001},
        {0x0000a394, 0x00000444},
-       {0x0000a398, 0x00000000},
-       {0x0000a39c, 0x210d0401},
-       {0x0000a3a0, 0xab9a7144},
+       {0x0000a398, 0x001f0e0f},
+       {0x0000a39c, 0x0075393f},
+       {0x0000a3a0, 0xb79f6427},
        {0x0000a3a4, 0x00000000},
        {0x0000a3a8, 0xaaaaaaaa},
        {0x0000a3ac, 0x3c466478},
index 3c9113d9b1bc3cfe3aa7e9e07be8efa1a2df4d14..8e5c3b9786e3ac3fab8cf42e0d80bf84f268d4d2 100644 (file)
@@ -257,9 +257,9 @@ static const u32 qca953x_1p0_baseband_core[][2] = {
        {0x0000a370, 0x00000000},
        {0x0000a390, 0x00000001},
        {0x0000a394, 0x00000444},
-       {0x0000a398, 0x1f020503},
-       {0x0000a39c, 0x29180c03},
-       {0x0000a3a0, 0x9a8b6844},
+       {0x0000a398, 0x001f0e0f},
+       {0x0000a39c, 0x0075393f},
+       {0x0000a3a0, 0xb79f6427},
        {0x0000a3a4, 0x000000ff},
        {0x0000a3a8, 0x6a6a6a6a},
        {0x0000a3ac, 0x6a6a6a6a},
index e6aec2c0207ff43754a79a236a769a33bacf401e..a5ca65240af30b8980b5732a610d73ef155063c9 100644 (file)
@@ -90,7 +90,7 @@ static const u32 ar9580_1p0_baseband_core[][2] = {
        {0x00009e30, 0x06336f77},
        {0x00009e34, 0x6af6532f},
        {0x00009e38, 0x0cc80c00},
-       {0x00009e40, 0x0d261820},
+       {0x00009e40, 0x0d261800},
        {0x00009e4c, 0x00001004},
        {0x00009e50, 0x00ff03f1},
        {0x00009e54, 0x00000000},
index 44d74495c4de1465dbf42cba08b6bff03090023a..33a2ae77b59514b66269cfbc7c42512611d7c8a6 100644 (file)
@@ -114,6 +114,9 @@ int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
 #define ATH_TXFIFO_DEPTH           8
 #define ATH_TX_ERROR               0x01
 
+/* Stop tx traffic 1ms before the GO goes away */
+#define ATH_P2P_PS_STOP_TIME       1000
+
 #define IEEE80211_SEQ_SEQ_SHIFT    4
 #define IEEE80211_SEQ_MAX          4096
 #define IEEE80211_WEP_IVLEN        3
@@ -251,7 +254,6 @@ struct ath_atx_tid {
 
        s8 bar_index;
        bool sched;
-       bool paused;
        bool active;
 };
 
@@ -367,11 +369,15 @@ void ath9k_release_buffered_frames(struct ieee80211_hw *hw,
 /********/
 
 struct ath_vif {
+       struct ieee80211_vif *vif;
        struct ath_node mcast_node;
        int av_bslot;
        bool primary_sta_vif;
        __le64 tsf_adjust; /* TSF adjustment for staggered beacons */
        struct ath_buf *av_bcbuf;
+
+       /* P2P Client */
+       struct ieee80211_noa_data noa;
 };
 
 struct ath9k_vif_iter_data {
@@ -464,6 +470,8 @@ int ath_update_survey_stats(struct ath_softc *sc);
 void ath_update_survey_nf(struct ath_softc *sc, int channel);
 void ath9k_queue_reset(struct ath_softc *sc, enum ath_reset_type type);
 void ath_ps_full_sleep(unsigned long data);
+void ath9k_p2p_ps_timer(void *priv);
+void ath9k_update_p2p_ps(struct ath_softc *sc, struct ieee80211_vif *vif);
 
 /**********/
 /* BTCOEX */
@@ -714,6 +722,9 @@ struct ath_softc {
        struct completion paprd_complete;
        wait_queue_head_t tx_wait;
 
+       struct ath_gen_timer *p2p_ps_timer;
+       struct ath_vif *p2p_ps_vif;
+
        unsigned long driver_data;
 
        u8 gtt_cnt;
index d76e6e0120d2c28236163f56de4471c46f397c60..ffca918ff16aff4be941d572ac19d4f152e5b2c2 100644 (file)
@@ -72,7 +72,7 @@ static ssize_t read_file_node_aggr(struct file *file, char __user *user_buf,
                ath_txq_lock(sc, txq);
                if (tid->active) {
                        len += scnprintf(buf + len, size - len,
-                                        "%3d%11d%10d%10d%10d%10d%9d%6d%8d\n",
+                                        "%3d%11d%10d%10d%10d%10d%9d%6d\n",
                                         tid->tidno,
                                         tid->seq_start,
                                         tid->seq_next,
@@ -80,8 +80,7 @@ static ssize_t read_file_node_aggr(struct file *file, char __user *user_buf,
                                         tid->baw_head,
                                         tid->baw_tail,
                                         tid->bar_index,
-                                        tid->sched,
-                                        tid->paused);
+                                        tid->sched);
                }
                ath_txq_unlock(sc, txq);
        }
index cbbb02a6b13b463c9bfdf8b1bad21f45cde0b4cd..4243509616bd75e58e2d1af4e8ec375ccd021281 100644 (file)
@@ -589,6 +589,9 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc,
        if (ret)
                goto err_btcoex;
 
+       sc->p2p_ps_timer = ath_gen_timer_alloc(sc->sc_ah, ath9k_p2p_ps_timer,
+               NULL, sc, AR_FIRST_NDP_TIMER);
+
        ath9k_cmn_init_crypto(sc->sc_ah);
        ath9k_init_misc(sc);
        ath_fill_led_pin(sc);
@@ -644,13 +647,13 @@ static void ath9k_init_txpower_limits(struct ath_softc *sc)
 
 static const struct ieee80211_iface_limit if_limits[] = {
        { .max = 2048,  .types = BIT(NL80211_IFTYPE_STATION) |
-                                BIT(NL80211_IFTYPE_P2P_CLIENT) |
                                 BIT(NL80211_IFTYPE_WDS) },
        { .max = 8,     .types =
 #ifdef CONFIG_MAC80211_MESH
                                 BIT(NL80211_IFTYPE_MESH_POINT) |
 #endif
-                                BIT(NL80211_IFTYPE_AP) |
+                                BIT(NL80211_IFTYPE_AP) },
+       { .max = 1,     .types = BIT(NL80211_IFTYPE_P2P_CLIENT) |
                                 BIT(NL80211_IFTYPE_P2P_GO) },
 };
 
@@ -783,6 +786,9 @@ int ath9k_init_device(u16 devid, struct ath_softc *sc,
        common = ath9k_hw_common(ah);
        ath9k_set_hw_capab(sc, hw);
 
+       /* Will be cleared in ath9k_start() */
+       set_bit(ATH_OP_INVALID, &common->op_flags);
+
        /* Initialize regulatory */
        error = ath_regd_init(&common->regulatory, sc->hw->wiphy,
                              ath9k_reg_notifier);
@@ -852,6 +858,9 @@ static void ath9k_deinit_softc(struct ath_softc *sc)
 {
        int i = 0;
 
+       if (sc->p2p_ps_timer)
+               ath_gen_timer_free(sc->sc_ah, sc->p2p_ps_timer);
+
        ath9k_deinit_btcoex(sc);
 
        for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
index d69853b848ce1f10167275c4e85d6026ee41c5f1..8d7b9b66fefa592e5cee77b55e84c49b14ea2de0 100644 (file)
@@ -261,6 +261,8 @@ static bool ath_complete_reset(struct ath_softc *sc, bool start)
        sc->gtt_cnt = 0;
        ieee80211_wake_queues(sc->hw);
 
+       ath9k_p2p_ps_timer(sc);
+
        return true;
 }
 
@@ -1119,6 +1121,8 @@ static int ath9k_add_interface(struct ieee80211_hw *hw,
        if (ath9k_uses_beacons(vif->type))
                ath9k_beacon_assign_slot(sc, vif);
 
+       avp->vif = vif;
+
        an->sc = sc;
        an->sta = NULL;
        an->vif = vif;
@@ -1163,6 +1167,29 @@ static int ath9k_change_interface(struct ieee80211_hw *hw,
        return 0;
 }
 
+static void
+ath9k_update_p2p_ps_timer(struct ath_softc *sc, struct ath_vif *avp)
+{
+       struct ath_hw *ah = sc->sc_ah;
+       s32 tsf, target_tsf;
+
+       if (!avp || !avp->noa.has_next_tsf)
+               return;
+
+       ath9k_hw_gen_timer_stop(ah, sc->p2p_ps_timer);
+
+       tsf = ath9k_hw_gettsf32(sc->sc_ah);
+
+       target_tsf = avp->noa.next_tsf;
+       if (!avp->noa.absent)
+               target_tsf -= ATH_P2P_PS_STOP_TIME;
+
+       if (target_tsf - tsf < ATH_P2P_PS_STOP_TIME)
+               target_tsf = tsf + ATH_P2P_PS_STOP_TIME;
+
+       ath9k_hw_gen_timer_start(ah, sc->p2p_ps_timer, (u32) target_tsf, 1000000);
+}
+
 static void ath9k_remove_interface(struct ieee80211_hw *hw,
                                   struct ieee80211_vif *vif)
 {
@@ -1174,6 +1201,13 @@ static void ath9k_remove_interface(struct ieee80211_hw *hw,
 
        mutex_lock(&sc->mutex);
 
+       spin_lock_bh(&sc->sc_pcu_lock);
+       if (avp == sc->p2p_ps_vif) {
+               sc->p2p_ps_vif = NULL;
+               ath9k_update_p2p_ps_timer(sc, NULL);
+       }
+       spin_unlock_bh(&sc->sc_pcu_lock);
+
        sc->nvifs--;
        sc->tx99_vif = NULL;
 
@@ -1636,6 +1670,72 @@ static void ath9k_bss_assoc_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
                ath9k_set_assoc_state(sc, vif);
 }
 
+void ath9k_p2p_ps_timer(void *priv)
+{
+       struct ath_softc *sc = priv;
+       struct ath_vif *avp = sc->p2p_ps_vif;
+       struct ieee80211_vif *vif;
+       struct ieee80211_sta *sta;
+       struct ath_node *an;
+       u32 tsf;
+
+       if (!avp)
+               return;
+
+       tsf = ath9k_hw_gettsf32(sc->sc_ah);
+       if (!avp->noa.absent)
+               tsf += ATH_P2P_PS_STOP_TIME;
+
+       if (!avp->noa.has_next_tsf ||
+           avp->noa.next_tsf - tsf > BIT(31))
+               ieee80211_update_p2p_noa(&avp->noa, tsf);
+
+       ath9k_update_p2p_ps_timer(sc, avp);
+
+       rcu_read_lock();
+
+       vif = avp->vif;
+       sta = ieee80211_find_sta(vif, vif->bss_conf.bssid);
+       if (!sta)
+               goto out;
+
+       an = (void *) sta->drv_priv;
+       if (an->sleeping == !!avp->noa.absent)
+               goto out;
+
+       an->sleeping = avp->noa.absent;
+       if (an->sleeping)
+               ath_tx_aggr_sleep(sta, sc, an);
+       else
+               ath_tx_aggr_wakeup(sc, an);
+
+out:
+       rcu_read_unlock();
+}
+
+void ath9k_update_p2p_ps(struct ath_softc *sc, struct ieee80211_vif *vif)
+{
+       struct ath_vif *avp = (void *)vif->drv_priv;
+       unsigned long flags;
+       u32 tsf;
+
+       if (!sc->p2p_ps_timer)
+               return;
+
+       if (vif->type != NL80211_IFTYPE_STATION || !vif->p2p)
+               return;
+
+       sc->p2p_ps_vif = avp;
+
+       spin_lock_irqsave(&sc->sc_pm_lock, flags);
+       if (!(sc->ps_flags & PS_BEACON_SYNC)) {
+               tsf = ath9k_hw_gettsf32(sc->sc_ah);
+               ieee80211_parse_p2p_noa(&vif->bss_conf.p2p_noa_attr, &avp->noa, tsf);
+               ath9k_update_p2p_ps_timer(sc, avp);
+       }
+       spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
+}
+
 static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
                                   struct ieee80211_vif *vif,
                                   struct ieee80211_bss_conf *bss_conf,
@@ -1710,6 +1810,12 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
                }
        }
 
+       if (changed & BSS_CHANGED_P2P_PS) {
+               spin_lock_bh(&sc->sc_pcu_lock);
+               ath9k_update_p2p_ps(sc, vif);
+               spin_unlock_bh(&sc->sc_pcu_lock);
+       }
+
        if (changed & CHECK_ANI)
                ath_check_ani(sc);
 
@@ -1883,7 +1989,8 @@ static bool ath9k_has_tx_pending(struct ath_softc *sc)
        return !!npend;
 }
 
-static void ath9k_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
+static void ath9k_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+                       u32 queues, bool drop)
 {
        struct ath_softc *sc = hw->priv;
        struct ath_hw *ah = sc->sc_ah;
index 25304adece571d9d4e498df9398103a49ad08a7e..914dbc6b17208df991e92d3c3539508ea31f57ad 100644 (file)
@@ -784,7 +784,6 @@ static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 {
        struct ath_softc *sc;
        struct ieee80211_hw *hw;
-       struct ath_common *common;
        u8 csz;
        u32 val;
        int ret = 0;
@@ -877,10 +876,6 @@ static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
        wiphy_info(hw->wiphy, "%s mem=0x%lx, irq=%d\n",
                   hw_name, (unsigned long)sc->mem, pdev->irq);
 
-       /* Will be cleared in ath9k_start() */
-       common = ath9k_hw_common(sc->sc_ah);
-       set_bit(ATH_OP_INVALID, &common->op_flags);
-
        return 0;
 
 err_init:
index 6c9accdb52e4140076d7378f530e975c34f68433..441c71448e4ce7ff77c36ac943edf16d383b1ec0 100644 (file)
@@ -539,6 +539,9 @@ static void ath_rx_ps_beacon(struct ath_softc *sc, struct sk_buff *skb)
                ath_dbg(common, PS,
                        "Reconfigure beacon timers based on synchronized timestamp\n");
                ath9k_set_beacon(sc);
+
+               if (sc->p2p_ps_vif)
+                       ath9k_update_p2p_ps(sc, sc->p2p_ps_vif->vif);
        }
 
        if (ath_beacon_dtim_pending_cab(skb)) {
@@ -975,6 +978,7 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
        u64 tsf = 0;
        unsigned long flags;
        dma_addr_t new_buf_addr;
+       unsigned int budget = 512;
 
        if (edma)
                dma_type = DMA_BIDIRECTIONAL;
@@ -1113,15 +1117,17 @@ requeue_drop_frag:
                }
 requeue:
                list_add_tail(&bf->list, &sc->rx.rxbuf);
-               if (flush)
-                       continue;
 
                if (edma) {
                        ath_rx_edma_buf_link(sc, qtype);
                } else {
                        ath_rx_buf_relink(sc, bf);
-                       ath9k_hw_rxena(ah);
+                       if (!flush)
+                               ath9k_hw_rxena(ah);
                }
+
+               if (!budget--)
+                       break;
        } while (1);
 
        if (!(ah->imask & ATH9K_INT_RXEOL)) {
index 87cbec47fb48371403daaa70b32c1b9bc40ce1ec..66acb2cbd9df3cc45c307bb6b38118436da3d01a 100644 (file)
@@ -107,9 +107,6 @@ static void ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid)
 {
        struct ath_atx_ac *ac = tid->ac;
 
-       if (tid->paused)
-               return;
-
        if (tid->sched)
                return;
 
@@ -1407,7 +1404,6 @@ int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
        ath_tx_tid_change_state(sc, txtid);
 
        txtid->active = true;
-       txtid->paused = true;
        *ssn = txtid->seq_start = txtid->seq_next;
        txtid->bar_index = -1;
 
@@ -1427,7 +1423,6 @@ void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
 
        ath_txq_lock(sc, txq);
        txtid->active = false;
-       txtid->paused = false;
        ath_tx_flush_tid(sc, txtid);
        ath_tx_tid_change_state(sc, txtid);
        ath_txq_unlock_complete(sc, txq);
@@ -1487,7 +1482,7 @@ void ath_tx_aggr_wakeup(struct ath_softc *sc, struct ath_node *an)
                ath_txq_lock(sc, txq);
                ac->clear_ps_filter = true;
 
-               if (!tid->paused && ath_tid_has_buffered(tid)) {
+               if (ath_tid_has_buffered(tid)) {
                        ath_tx_queue_tid(txq, tid);
                        ath_txq_schedule(sc, txq);
                }
@@ -1510,7 +1505,6 @@ void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta,
        ath_txq_lock(sc, txq);
 
        tid->baw_size = IEEE80211_MIN_AMPDU_BUF << sta->ht_cap.ampdu_factor;
-       tid->paused = false;
 
        if (ath_tid_has_buffered(tid)) {
                ath_tx_queue_tid(txq, tid);
@@ -1544,8 +1538,6 @@ void ath9k_release_buffered_frames(struct ieee80211_hw *hw,
                        continue;
 
                tid = ATH_AN_2_TID(an, i);
-               if (tid->paused)
-                       continue;
 
                ath_txq_lock(sc, tid->ac->txq);
                while (nframes > 0) {
@@ -1844,9 +1836,6 @@ void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
                        list_del(&tid->list);
                        tid->sched = false;
 
-                       if (tid->paused)
-                               continue;
-
                        if (ath_tx_sched_aggr(sc, txq, tid, &stop))
                                sent = true;
 
@@ -2698,7 +2687,6 @@ void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
                tid->baw_size  = WME_MAX_BA;
                tid->baw_head  = tid->baw_tail = 0;
                tid->sched     = false;
-               tid->paused    = false;
                tid->active        = false;
                __skb_queue_head_init(&tid->buf_q);
                __skb_queue_head_init(&tid->retry_q);
index 4c8cdb097b6599c08816a4e184537d764bc9ef43..f8ded84b7be8c5e3b6038910a8829364a5e5ba1e 100644 (file)
@@ -1707,7 +1707,9 @@ found:
        return 0;
 }
 
-static void carl9170_op_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
+static void carl9170_op_flush(struct ieee80211_hw *hw,
+                             struct ieee80211_vif *vif,
+                             u32 queues, bool drop)
 {
        struct ar9170 *ar = hw->priv;
        unsigned int vid;
index 5824cd41e4bac6d387087ab84739df3225b8f799..73593aa3cd9813e2bd6849b969427f83f1a9a578 100644 (file)
@@ -338,7 +338,7 @@ static irqreturn_t wil6210_irq_misc_thread(int irq, void *cookie)
        }
 
        if (isr)
-               wil_err(wil, "un-handled MISC ISR bits 0x%08x\n", isr);
+               wil_dbg_irq(wil, "un-handled MISC ISR bits 0x%08x\n", isr);
 
        wil->isr_misc = 0;
 
index 95f4efe9ef37c652722a5ca381af4529a6ea65e9..1b265fd19de26060329ffb3c7b2818feeb2fcc68 100644 (file)
@@ -363,8 +363,8 @@ static int wil_wait_for_fw_ready(struct wil6210_priv *wil)
                wil_err(wil, "Firmware not ready\n");
                return -ETIME;
        } else {
-               wil_dbg_misc(wil, "FW ready after %d ms\n",
-                            jiffies_to_msecs(to-left));
+               wil_info(wil, "FW ready after %d ms. HW version 0x%08x\n",
+                        jiffies_to_msecs(to-left), wil->hw_version);
        }
        return 0;
 }
index f1e1bb338d681e71c96b130363bf04387c3ffa85..0660884183070d7d46876e0a07effb2c1650e005 100644 (file)
@@ -74,8 +74,6 @@ static int wil_if_pcie_enable(struct wil6210_priv *wil)
        if (rc)
                goto release_irq;
 
-       wil_info(wil, "HW version: 0x%08x\n", wil->hw_version);
-
        return 0;
 
  release_irq:
index d04629fe053f5e2f864bc1342ebdadf144d17008..ec29954bd44dd1d8de1cf46ce736cc4f8157127e 100644 (file)
@@ -91,6 +91,22 @@ void wil_rx_reorder(struct wil6210_priv *wil, struct sk_buff *skb)
 
        spin_lock(&r->reorder_lock);
 
+       /** Due to the race between WMI events, where BACK establishment
+        * reported, and data Rx, few packets may be pass up before reorder
+        * buffer get allocated. Catch up by pretending SSN is what we
+        * see in the 1-st Rx packet
+        */
+       if (r->first_time) {
+               r->first_time = false;
+               if (seq != r->head_seq_num) {
+                       wil_err(wil, "Error: 1-st frame with wrong sequence"
+                               " %d, should be %d. Fixing...\n", seq,
+                               r->head_seq_num);
+                       r->head_seq_num = seq;
+                       r->ssn = seq;
+               }
+       }
+
        /* frame with out of date sequence number */
        if (seq_less(seq, r->head_seq_num)) {
                dev_kfree_skb(skb);
@@ -162,6 +178,7 @@ struct wil_tid_ampdu_rx *wil_tid_ampdu_rx_alloc(struct wil6210_priv *wil,
        r->head_seq_num = ssn;
        r->buf_size = size;
        r->stored_mpdu_num = 0;
+       r->first_time = true;
        return r;
 }
 
index 2a2dec75f02606c9ea71ecd95d846287520157d5..d3b8659ca32e17853602a8a21525f345abaf92a2 100644 (file)
@@ -35,7 +35,7 @@ static inline u32 WIL_GET_BITS(u32 x, int b0, int b1)
 #define WIL6210_MEM_SIZE (2*1024*1024UL)
 
 #define WIL6210_RX_RING_SIZE   (128)
-#define WIL6210_TX_RING_SIZE   (128)
+#define WIL6210_TX_RING_SIZE   (512)
 #define WIL6210_MAX_TX_RINGS   (24) /* HW limit */
 #define WIL6210_MAX_CID                (8) /* HW limit */
 #define WIL6210_NAPI_BUDGET    (16) /* arbitrary */
@@ -301,6 +301,7 @@ struct wil_tid_ampdu_rx {
        u16 buf_size;
        u16 timeout;
        u8 dialog_token;
+       bool first_time; /* is it 1-st time this buffer used? */
 };
 
 struct wil6210_stats {
index 2ba56eef0c457d4397c27fa84a1291b011d3884f..e9a11cb3428ad1c44d4f33b18aeab3198a937120 100644 (file)
@@ -192,7 +192,7 @@ static int __wmi_send(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len)
        might_sleep();
 
        if (!test_bit(wil_status_fwready, &wil->status)) {
-               wil_err(wil, "FW not ready\n");
+               wil_err(wil, "WMI: cannot send command while FW not ready\n");
                return -EAGAIN;
        }
 
@@ -276,8 +276,8 @@ static void wmi_evt_ready(struct wil6210_priv *wil, int id, void *d, int len)
        wil->fw_version = le32_to_cpu(evt->sw_version);
        wil->n_mids = evt->numof_additional_mids;
 
-       wil_dbg_wmi(wil, "FW ver. %d; MAC %pM; %d MID's\n", wil->fw_version,
-                   evt->mac, wil->n_mids);
+       wil_info(wil, "FW ver. %d; MAC %pM; %d MID's\n", wil->fw_version,
+                evt->mac, wil->n_mids);
 
        if (!is_valid_ether_addr(ndev->dev_addr)) {
                memcpy(ndev->dev_addr, evt->mac, ETH_ALEN);
@@ -290,7 +290,7 @@ static void wmi_evt_ready(struct wil6210_priv *wil, int id, void *d, int len)
 static void wmi_evt_fw_ready(struct wil6210_priv *wil, int id, void *d,
                             int len)
 {
-       wil_dbg_wmi(wil, "WMI: FW ready\n");
+       wil_dbg_wmi(wil, "WMI: got FW ready event\n");
 
        set_bit(wil_status_fwready, &wil->status);
        /* reuse wmi_ready for the firmware ready indication */
@@ -348,7 +348,7 @@ static void wmi_evt_scan_complete(struct wil6210_priv *wil, int id,
 {
        if (wil->scan_request) {
                struct wmi_scan_complete_event *data = d;
-               bool aborted = (data->status != 0);
+               bool aborted = (data->status != WMI_SCAN_SUCCESS);
 
                wil_dbg_wmi(wil, "SCAN_COMPLETE(0x%08x)\n", data->status);
                cfg80211_scan_done(wil->scan_request, aborted);
@@ -802,6 +802,7 @@ int wmi_pcp_start(struct wil6210_priv *wil, int bi, u8 wmi_nettype, u8 chan)
                .network_type = wmi_nettype,
                .disable_sec_offload = 1,
                .channel = chan - 1,
+               .pcp_max_assoc_sta = WIL6210_MAX_CID,
        };
        struct {
                struct wil6210_mbox_hdr_wmi wmi;
index 50b8528394f4050b8098098e9bbdd0520ae1bf2a..17334c852866c3cc8a061eecb829052ebd9415cf 100644 (file)
@@ -28,7 +28,7 @@
 #define __WILOCITY_WMI_H__
 
 /* General */
-
+#define WILOCITY_MAX_ASSOC_STA (8)
 #define WMI_MAC_LEN            (6)
 #define WMI_PROX_RANGE_NUM     (3)
 
@@ -219,15 +219,6 @@ struct wmi_disconnect_sta_cmd {
        __le16 disconnect_reason;
 } __packed;
 
-/*
- * WMI_RECONNECT_CMDID
- */
-struct wmi_reconnect_cmd {
-       u8 channel;                     /* hint */
-       u8 reserved;
-       u8 bssid[WMI_MAC_LEN];          /* mandatory if set */
-} __packed;
-
 
 /*
  * WMI_SET_PMK_CMDID
@@ -296,11 +287,13 @@ enum wmi_scan_type {
        WMI_LONG_SCAN           = 0,
        WMI_SHORT_SCAN          = 1,
        WMI_PBC_SCAN            = 2,
+       WMI_ACTIVE_SCAN         = 3,
+       WMI_DIRECT_SCAN         = 4,
 };
 
 struct wmi_start_scan_cmd {
-       u8 reserved[8];
-
+       u8 direct_scan_mac_addr[6];
+       u8 reserved[2];
        __le32 home_dwell_time; /* Max duration in the home channel(ms) */
        __le32 force_scan_interval;     /* Time interval between scans (ms)*/
        u8 scan_type;           /* wmi_scan_type */
@@ -332,6 +325,7 @@ struct wmi_probed_ssid_cmd {
        u8 ssid[WMI_MAX_SSID_LEN];
 } __packed;
 
+
 /*
  * WMI_SET_APPIE_CMDID
  * Add Application specified IE to a management frame
@@ -427,7 +421,7 @@ struct wmi_bcon_ctrl_cmd {
        __le16 frag_num;
        __le64 ss_mask;
        u8 network_type;
-       u8 reserved;
+       u8 pcp_max_assoc_sta;
        u8 disable_sec_offload;
        u8 disable_sec;
 } __packed;
@@ -450,7 +444,7 @@ enum wmi_port_role {
 struct wmi_port_allocate_cmd {
        u8 mac[WMI_MAC_LEN];
        u8 port_role;
-       u8 midid;
+       u8 mid;
 } __packed;
 
 /*
@@ -467,6 +461,7 @@ struct wmi_delete_port_cmd {
 enum wmi_discovery_mode {
        WMI_DISCOVERY_MODE_NON_OFFLOAD  = 0,
        WMI_DISCOVERY_MODE_OFFLOAD      = 1,
+       WMI_DISCOVERY_MODE_PEER2PEER    = 2,
 };
 
 struct wmi_p2p_cfg_cmd {
@@ -493,7 +488,8 @@ struct wmi_power_mgmt_cfg_cmd {
  */
 struct wmi_pcp_start_cmd {
        __le16 bcon_interval;
-       u8 reserved0[10];
+       u8 pcp_max_assoc_sta;
+       u8 reserved0[9];
        u8 network_type;
        u8 channel;
        u8 disable_sec_offload;
@@ -857,6 +853,7 @@ enum wmi_event_id {
        WMI_RF_MGMT_STATUS_EVENTID              = 0x1853,
        WMI_BF_SM_MGMT_DONE_EVENTID             = 0x1838,
        WMI_RX_MGMT_PACKET_EVENTID              = 0x1840,
+       WMI_TX_MGMT_PACKET_EVENTID              = 0x1841,
 
        /* Performance monitoring events */
        WMI_DATA_PORT_OPEN_EVENTID              = 0x1860,
@@ -1040,16 +1037,23 @@ enum wmi_disconnect_reason {
 struct wmi_disconnect_event {
        __le16 protocol_reason_status;  /* reason code, see 802.11 spec. */
        u8 bssid[WMI_MAC_LEN];          /* set if known */
-       u8 disconnect_reason;           /* see wmi_disconnect_reason_e */
-       u8 assoc_resp_len;
-       u8 assoc_info[0];
+       u8 disconnect_reason;           /* see wmi_disconnect_reason */
+       u8 assoc_resp_len;              /* not in use */
+       u8 assoc_info[0];               /* not in use */
 } __packed;
 
 /*
  * WMI_SCAN_COMPLETE_EVENTID
  */
+enum scan_status {
+       WMI_SCAN_SUCCESS        = 0,
+       WMI_SCAN_FAILED         = 1,
+       WMI_SCAN_ABORTED        = 2,
+       WMI_SCAN_REJECTED       = 3,
+};
+
 struct wmi_scan_complete_event {
-       __le32 status;
+       __le32 status;  /* scan_status */
 } __packed;
 
 /*
@@ -1256,6 +1260,14 @@ struct wmi_rx_mgmt_info {
        u8 channel;     /* From Radio MNGR */
 } __packed;
 
+
+/*
+ * WMI_TX_MGMT_PACKET_EVENTID
+ */
+struct wmi_tx_mgmt_packet_event {
+       u8 payload[0];
+} __packed;
+
 struct wmi_rx_mgmt_packet_event {
        struct wmi_rx_mgmt_info info;
        u8 payload[0];
index 54376fddfaf9f11fef6e4a7a0ae5554d5e427f4c..4113b69347640359aa22020eb0dcfd7efe0b7c14 100644 (file)
@@ -915,10 +915,6 @@ struct b43_wl {
        char rng_name[30 + 1];
 #endif /* CONFIG_B43_HWRNG */
 
-       /* List of all wireless devices on this chip */
-       struct list_head devlist;
-       u8 nr_devs;
-
        bool radiotap_enabled;
        bool radio_enabled;
 
index 69fc3d65531a7eeb88c4901f804ab197dbbb939e..32e08d35c06e131118e761a7df207a59b7e0043d 100644 (file)
@@ -1195,8 +1195,13 @@ static void b43_bcma_wireless_core_reset(struct b43_wldev *dev, bool gmode)
                  B43_BCMA_CLKCTLST_PHY_PLL_REQ;
        u32 status = B43_BCMA_CLKCTLST_80211_PLL_ST |
                     B43_BCMA_CLKCTLST_PHY_PLL_ST;
+       u32 flags;
+
+       flags = B43_BCMA_IOCTL_PHY_CLKEN;
+       if (gmode)
+               flags |= B43_BCMA_IOCTL_GMODE;
+       b43_device_enable(dev, flags);
 
-       b43_device_enable(dev, B43_BCMA_IOCTL_PHY_CLKEN);
        bcma_core_set_clockmode(dev->dev->bdev, BCMA_CLKMODE_FAST);
        b43_bcma_phy_reset(dev);
        bcma_core_pll_ctl(dev->dev->bdev, req, status, true);
@@ -3735,40 +3740,35 @@ static int b43_switch_band(struct b43_wl *wl, struct ieee80211_channel *chan)
 {
        struct b43_wldev *up_dev = NULL;
        struct b43_wldev *down_dev;
-       struct b43_wldev *d;
        int err;
        bool uninitialized_var(gmode);
        int prev_status;
 
        /* Find a device and PHY which supports the band. */
-       list_for_each_entry(d, &wl->devlist, list) {
-               switch (chan->band) {
-               case IEEE80211_BAND_5GHZ:
-                       if (d->phy.supports_5ghz) {
-                               up_dev = d;
-                               gmode = false;
-                       }
-                       break;
-               case IEEE80211_BAND_2GHZ:
-                       if (d->phy.supports_2ghz) {
-                               up_dev = d;
-                               gmode = true;
-                       }
-                       break;
-               default:
-                       B43_WARN_ON(1);
-                       return -EINVAL;
+       switch (chan->band) {
+       case IEEE80211_BAND_5GHZ:
+               if (wl->current_dev->phy.supports_5ghz) {
+                       up_dev = wl->current_dev;
+                       gmode = false;
                }
-               if (up_dev)
-                       break;
+               break;
+       case IEEE80211_BAND_2GHZ:
+               if (wl->current_dev->phy.supports_2ghz) {
+                       up_dev = wl->current_dev;
+                       gmode = true;
+               }
+               break;
+       default:
+               B43_WARN_ON(1);
+               return -EINVAL;
        }
+
        if (!up_dev) {
                b43err(wl, "Could not find a device for %s-GHz band operation\n",
                       band_to_string(chan->band));
                return -ENODEV;
        }
-       if ((up_dev == wl->current_dev) &&
-           (!!wl->current_dev->phy.gmode == !!gmode)) {
+       if (!!wl->current_dev->phy.gmode == !!gmode) {
                /* This device is already running. */
                return 0;
        }
@@ -5178,7 +5178,6 @@ static int b43_wireless_core_attach(struct b43_wldev *dev)
        }
 
        dev->phy.gmode = have_2ghz_phy;
-       dev->phy.radio_on = true;
        b43_wireless_core_reset(dev, dev->phy.gmode);
 
        err = b43_phy_versioning(dev);
@@ -5270,7 +5269,6 @@ static void b43_one_core_detach(struct b43_bus_dev *dev)
        b43_debugfs_remove_device(wldev);
        b43_wireless_core_detach(wldev);
        list_del(&wldev->list);
-       wl->nr_devs--;
        b43_bus_set_wldev(dev, NULL);
        kfree(wldev);
 }
@@ -5295,8 +5293,6 @@ static int b43_one_core_attach(struct b43_bus_dev *dev, struct b43_wl *wl)
        if (err)
                goto err_kfree_wldev;
 
-       list_add(&wldev->list, &wl->devlist);
-       wl->nr_devs++;
        b43_bus_set_wldev(dev, wldev);
        b43_debugfs_add_device(wldev);
 
@@ -5386,7 +5382,6 @@ static struct b43_wl *b43_wireless_init(struct b43_bus_dev *dev)
        wl->hw = hw;
        mutex_init(&wl->mutex);
        spin_lock_init(&wl->hardirq_lock);
-       INIT_LIST_HEAD(&wl->devlist);
        INIT_WORK(&wl->beacon_update_trigger, b43_beacon_update_trigger_work);
        INIT_WORK(&wl->txpower_adjust_work, b43_phy_txpower_adjust_work);
        INIT_WORK(&wl->tx_work, b43_tx_work);
@@ -5486,39 +5481,42 @@ int b43_ssb_probe(struct ssb_device *sdev, const struct ssb_device_id *id)
        struct b43_bus_dev *dev;
        struct b43_wl *wl;
        int err;
-       int first = 0;
 
        dev = b43_bus_dev_ssb_init(sdev);
        if (!dev)
                return -ENOMEM;
 
        wl = ssb_get_devtypedata(sdev);
-       if (!wl) {
-               /* Probing the first core. Must setup common struct b43_wl */
-               first = 1;
-               b43_sprom_fixup(sdev->bus);
-               wl = b43_wireless_init(dev);
-               if (IS_ERR(wl)) {
-                       err = PTR_ERR(wl);
-                       goto out;
-               }
-               ssb_set_devtypedata(sdev, wl);
-               B43_WARN_ON(ssb_get_devtypedata(sdev) != wl);
+       if (wl) {
+               b43err(NULL, "Dual-core devices are not supported\n");
+               err = -ENOTSUPP;
+               goto err_ssb_kfree_dev;
+       }
+
+       b43_sprom_fixup(sdev->bus);
+
+       wl = b43_wireless_init(dev);
+       if (IS_ERR(wl)) {
+               err = PTR_ERR(wl);
+               goto err_ssb_kfree_dev;
        }
+       ssb_set_devtypedata(sdev, wl);
+       B43_WARN_ON(ssb_get_devtypedata(sdev) != wl);
+
        err = b43_one_core_attach(dev, wl);
        if (err)
-               goto err_wireless_exit;
+               goto err_ssb_wireless_exit;
 
        /* setup and start work to load firmware */
        INIT_WORK(&wl->firmware_load, b43_request_firmware);
        schedule_work(&wl->firmware_load);
 
-      out:
        return err;
 
-      err_wireless_exit:
-       if (first)
-               b43_wireless_exit(dev, wl);
+err_ssb_wireless_exit:
+       b43_wireless_exit(dev, wl);
+err_ssb_kfree_dev:
+       kfree(dev);
        return err;
 }
 
@@ -5546,13 +5544,8 @@ static void b43_ssb_remove(struct ssb_device *sdev)
        /* Unregister HW RNG driver */
        b43_rng_exit(wl);
 
-       if (list_empty(&wl->devlist)) {
-               b43_leds_unregister(wl);
-               /* Last core on the chip unregistered.
-                * We can destroy common struct b43_wl.
-                */
-               b43_wireless_exit(dev, wl);
-       }
+       b43_leds_unregister(wl);
+       b43_wireless_exit(dev, wl);
 }
 
 static struct ssb_driver b43_ssb_driver = {
index dbaa51890198945b7552ed506bc3c8bfe4ba2359..3e45989f418d00876d00590094c6e9a1db1a4c56 100644 (file)
@@ -96,7 +96,7 @@ int b43_phy_init(struct b43_wldev *dev)
 
        phy->channel = ops->get_default_chan(dev);
 
-       ops->software_rfkill(dev, false);
+       b43_software_rfkill(dev, false);
        err = ops->init(dev);
        if (err) {
                b43err(dev->wl, "PHY init failed\n");
@@ -116,7 +116,7 @@ err_phy_exit:
        if (ops->exit)
                ops->exit(dev);
 err_block_rf:
-       ops->software_rfkill(dev, true);
+       b43_software_rfkill(dev, true);
 
        return err;
 }
@@ -125,7 +125,7 @@ void b43_phy_exit(struct b43_wldev *dev)
 {
        const struct b43_phy_operations *ops = dev->phy.ops;
 
-       ops->software_rfkill(dev, true);
+       b43_software_rfkill(dev, true);
        if (ops->exit)
                ops->exit(dev);
 }
index 12f467b8d564f1c5cdca06acbdf65ca8f80b48c8..8f5c14bc10e6fedcab2d7b88a0009470772dad8a 100644 (file)
@@ -1587,6 +1587,7 @@ static void b43_phy_initb5(struct b43_wldev *dev)
        b43_write16(dev, 0x03E4, (b43_read16(dev, 0x03E4) & 0xFFC0) | 0x0004);
 }
 
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/Init/B6 */
 static void b43_phy_initb6(struct b43_wldev *dev)
 {
        struct b43_phy *phy = &dev->phy;
@@ -1670,7 +1671,7 @@ static void b43_phy_initb6(struct b43_wldev *dev)
                b43_radio_write16(dev, 0x50, 0x20);
        }
        if (phy->radio_rev <= 2) {
-               b43_radio_write16(dev, 0x7C, 0x20);
+               b43_radio_write16(dev, 0x50, 0x20);
                b43_radio_write16(dev, 0x5A, 0x70);
                b43_radio_write16(dev, 0x5B, 0x7B);
                b43_radio_write16(dev, 0x5C, 0xB0);
@@ -1686,9 +1687,8 @@ static void b43_phy_initb6(struct b43_wldev *dev)
                b43_phy_write(dev, 0x2A, 0x8AC0);
        b43_phy_write(dev, 0x0038, 0x0668);
        b43_set_txpower_g(dev, &gphy->bbatt, &gphy->rfatt, gphy->tx_control);
-       if (phy->radio_rev <= 5) {
+       if (phy->radio_rev == 4 || phy->radio_rev == 5)
                b43_phy_maskset(dev, 0x5D, 0xFF80, 0x0003);
-       }
        if (phy->radio_rev <= 2)
                b43_radio_write16(dev, 0x005D, 0x000D);
 
index 24ccbe96e0c8a0723bb165b0f32677ab6ba7ded5..41dab89a2942dddbc80388079e5812382f3f6a43 100644 (file)
@@ -257,6 +257,72 @@ static void b43_nphy_rf_ctl_override(struct b43_wldev *dev, u16 field,
        }
 }
 
+static void b43_nphy_rf_ctl_intc_override_rev7(struct b43_wldev *dev,
+                                              enum n_intc_override intc_override,
+                                              u16 value, u8 core_sel)
+{
+       u16 reg, tmp, tmp2, val;
+       int core;
+
+       for (core = 0; core < 2; core++) {
+               if ((core_sel == 1 && core != 0) ||
+                   (core_sel == 2 && core != 1))
+                       continue;
+
+               reg = (core == 0) ? B43_NPHY_RFCTL_INTC1 : B43_NPHY_RFCTL_INTC2;
+
+               switch (intc_override) {
+               case N_INTC_OVERRIDE_OFF:
+                       b43_phy_write(dev, reg, 0);
+                       b43_nphy_force_rf_sequence(dev, B43_RFSEQ_RESET2RX);
+                       break;
+               case N_INTC_OVERRIDE_TRSW:
+                       b43_phy_maskset(dev, reg, ~0xC0, value << 6);
+                       b43_phy_set(dev, reg, 0x400);
+
+                       b43_phy_mask(dev, 0x2ff, ~0xC000 & 0xFFFF);
+                       b43_phy_set(dev, 0x2ff, 0x2000);
+                       b43_phy_set(dev, 0x2ff, 0x0001);
+                       break;
+               case N_INTC_OVERRIDE_PA:
+                       tmp = 0x0030;
+                       if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ)
+                               val = value << 5;
+                       else
+                               val = value << 4;
+                       b43_phy_maskset(dev, reg, ~tmp, val);
+                       b43_phy_set(dev, reg, 0x1000);
+                       break;
+               case N_INTC_OVERRIDE_EXT_LNA_PU:
+                       if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
+                               tmp = 0x0001;
+                               tmp2 = 0x0004;
+                               val = value;
+                       } else {
+                               tmp = 0x0004;
+                               tmp2 = 0x0001;
+                               val = value << 2;
+                       }
+                       b43_phy_maskset(dev, reg, ~tmp, val);
+                       b43_phy_mask(dev, reg, ~tmp2);
+                       break;
+               case N_INTC_OVERRIDE_EXT_LNA_GAIN:
+                       if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
+                               tmp = 0x0002;
+                               tmp2 = 0x0008;
+                               val = value << 1;
+                       } else {
+                               tmp = 0x0008;
+                               tmp2 = 0x0002;
+                               val = value << 3;
+                       }
+                       b43_phy_maskset(dev, reg, ~tmp, val);
+                       b43_phy_mask(dev, reg, ~tmp2);
+                       break;
+               }
+       }
+}
+
 /* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RFCtrlIntcOverride */
 static void b43_nphy_rf_ctl_intc_override(struct b43_wldev *dev,
                                          enum n_intc_override intc_override,
@@ -265,6 +331,12 @@ static void b43_nphy_rf_ctl_intc_override(struct b43_wldev *dev,
        u8 i, j;
        u16 reg, tmp, val;
 
+       if (dev->phy.rev >= 7) {
+               b43_nphy_rf_ctl_intc_override_rev7(dev, intc_override, value,
+                                                  core);
+               return;
+       }
+
        B43_WARN_ON(dev->phy.rev < 3);
 
        for (i = 0; i < 2; i++) {
@@ -419,7 +491,8 @@ static void b43_nphy_stay_in_carrier_search(struct b43_wldev *dev, bool enable)
                static const u16 clip[] = { 0xFFFF, 0xFFFF };
                if (nphy->deaf_count++ == 0) {
                        nphy->classifier_state = b43_nphy_classifier(dev, 0, 0);
-                       b43_nphy_classifier(dev, 0x7, 0);
+                       b43_nphy_classifier(dev, 0x7,
+                                           B43_NPHY_CLASSCTL_WAITEDEN);
                        b43_nphy_read_clip_detection(dev, nphy->clip_state);
                        b43_nphy_write_clip_detection(dev, clip);
                }
@@ -734,9 +807,16 @@ static void b43_radio_2056_setup(struct b43_wldev *dev,
        u16 bias, cbias;
        u16 pag_boost, padg_boost, pgag_boost, mixg_boost;
        u16 paa_boost, pada_boost, pgaa_boost, mixa_boost;
+       bool is_pkg_fab_smic;
 
        B43_WARN_ON(dev->phy.rev < 3);
 
+       is_pkg_fab_smic =
+               ((dev->dev->chip_id == BCMA_CHIP_ID_BCM43224 ||
+                 dev->dev->chip_id == BCMA_CHIP_ID_BCM43225 ||
+                 dev->dev->chip_id == BCMA_CHIP_ID_BCM43421) &&
+                dev->dev->chip_pkg == BCMA_PKG_ID_BCM43224_FAB_SMIC);
+
        b43_chantab_radio_2056_upload(dev, e);
        b2056_upload_syn_pll_cp2(dev, band == IEEE80211_BAND_5GHZ);
 
@@ -744,7 +824,8 @@ static void b43_radio_2056_setup(struct b43_wldev *dev,
            b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
                b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER1, 0x1F);
                b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER2, 0x1F);
-               if (dev->dev->chip_id == 0x4716) {
+               if (dev->dev->chip_id == BCMA_CHIP_ID_BCM4716 ||
+                   dev->dev->chip_id == BCMA_CHIP_ID_BCM47162) {
                        b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER4, 0x14);
                        b43_radio_write(dev, B2056_SYN_PLL_CP2, 0);
                } else {
@@ -752,6 +833,13 @@ static void b43_radio_2056_setup(struct b43_wldev *dev,
                        b43_radio_write(dev, B2056_SYN_PLL_CP2, 0x14);
                }
        }
+       if (sprom->boardflags2_hi & B43_BFH2_GPLL_WAR2 &&
+           b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+               b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER1, 0x1f);
+               b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER2, 0x1f);
+               b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER4, 0x0b);
+               b43_radio_write(dev, B2056_SYN_PLL_CP2, 0x20);
+       }
        if (sprom->boardflags2_lo & B43_BFL2_APLL_WAR &&
            b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
                b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER1, 0x1F);
@@ -767,7 +855,8 @@ static void b43_radio_2056_setup(struct b43_wldev *dev,
                                b43_radio_write(dev,
                                        offset | B2056_TX_PADG_IDAC, 0xcc);
 
-                               if (dev->dev->chip_id == 0x4716) {
+                               if (dev->dev->chip_id == BCMA_CHIP_ID_BCM4716 ||
+                                   dev->dev->chip_id == BCMA_CHIP_ID_BCM47162) {
                                        bias = 0x40;
                                        cbias = 0x45;
                                        pag_boost = 0x5;
@@ -776,6 +865,10 @@ static void b43_radio_2056_setup(struct b43_wldev *dev,
                                } else {
                                        bias = 0x25;
                                        cbias = 0x20;
+                                       if (is_pkg_fab_smic) {
+                                               bias = 0x2a;
+                                               cbias = 0x38;
+                                       }
                                        pag_boost = 0x4;
                                        pgag_boost = 0x03;
                                        mixg_boost = 0x65;
@@ -844,6 +937,8 @@ static void b43_radio_2056_setup(struct b43_wldev *dev,
                        mixa_boost = 0xF;
                }
 
+               cbias = is_pkg_fab_smic ? 0x35 : 0x30;
+
                for (i = 0; i < 2; i++) {
                        offset = i ? B2056_TX1 : B2056_TX0;
 
@@ -862,11 +957,11 @@ static void b43_radio_2056_setup(struct b43_wldev *dev,
                        b43_radio_write(dev,
                                offset | B2056_TX_PADA_CASCBIAS, 0x03);
                        b43_radio_write(dev,
-                               offset | B2056_TX_INTPAA_IAUX_STAT, 0x50);
+                               offset | B2056_TX_INTPAA_IAUX_STAT, 0x30);
                        b43_radio_write(dev,
-                               offset | B2056_TX_INTPAA_IMAIN_STAT, 0x50);
+                               offset | B2056_TX_INTPAA_IMAIN_STAT, 0x30);
                        b43_radio_write(dev,
-                               offset | B2056_TX_INTPAA_CASCBIAS, 0x30);
+                               offset | B2056_TX_INTPAA_CASCBIAS, cbias);
                }
        }
 
@@ -1164,23 +1259,20 @@ static void b43_nphy_run_samples(struct b43_wldev *dev, u16 samps, u16 loops,
        u16 seq_mode;
        u32 tmp;
 
-       if (nphy->hang_avoid)
-               b43_nphy_stay_in_carrier_search(dev, true);
+       b43_nphy_stay_in_carrier_search(dev, true);
 
        if ((nphy->bb_mult_save & 0x80000000) == 0) {
                tmp = b43_ntab_read(dev, B43_NTAB16(15, 87));
                nphy->bb_mult_save = (tmp & 0xFFFF) | 0x80000000;
        }
 
+       /* TODO: add modify_bbmult argument */
        if (!dev->phy.is_40mhz)
                tmp = 0x6464;
        else
                tmp = 0x4747;
        b43_ntab_write(dev, B43_NTAB16(15, 87), tmp);
 
-       if (nphy->hang_avoid)
-               b43_nphy_stay_in_carrier_search(dev, false);
-
        b43_phy_write(dev, B43_NPHY_SAMP_DEPCNT, (samps - 1));
 
        if (loops != 0xFFFF)
@@ -1213,6 +1305,8 @@ static void b43_nphy_run_samples(struct b43_wldev *dev, u16 samps, u16 loops,
                b43err(dev->wl, "run samples timeout\n");
 
        b43_phy_write(dev, B43_NPHY_RFSEQMODE, seq_mode);
+
+       b43_nphy_stay_in_carrier_search(dev, false);
 }
 
 /**************************************************
@@ -1588,8 +1682,8 @@ static void b43_nphy_rev3_rssi_cal(struct b43_wldev *dev)
        struct b43_phy_n *nphy = dev->phy.n;
 
        u16 saved_regs_phy_rfctl[2];
-       u16 saved_regs_phy[13];
-       u16 regs_to_store[] = {
+       u16 saved_regs_phy[22];
+       u16 regs_to_store_rev3[] = {
                B43_NPHY_AFECTL_OVER1, B43_NPHY_AFECTL_OVER,
                B43_NPHY_AFECTL_C1, B43_NPHY_AFECTL_C2,
                B43_NPHY_TXF_40CO_B1S1, B43_NPHY_RFCTL_OVER,
@@ -1598,6 +1692,20 @@ static void b43_nphy_rev3_rssi_cal(struct b43_wldev *dev)
                B43_NPHY_RFCTL_LUT_TRSW_UP1, B43_NPHY_RFCTL_LUT_TRSW_UP2,
                B43_NPHY_RFCTL_RSSIO1, B43_NPHY_RFCTL_RSSIO2
        };
+       u16 regs_to_store_rev7[] = {
+               B43_NPHY_AFECTL_OVER1, B43_NPHY_AFECTL_OVER,
+               B43_NPHY_AFECTL_C1, B43_NPHY_AFECTL_C2,
+               B43_NPHY_TXF_40CO_B1S1, B43_NPHY_RFCTL_OVER,
+               0x342, 0x343, 0x346, 0x347,
+               0x2ff,
+               B43_NPHY_TXF_40CO_B1S0, B43_NPHY_TXF_40CO_B32S1,
+               B43_NPHY_RFCTL_CMD,
+               B43_NPHY_RFCTL_LUT_TRSW_UP1, B43_NPHY_RFCTL_LUT_TRSW_UP2,
+               0x340, 0x341, 0x344, 0x345,
+               B43_NPHY_RFCTL_RSSIO1, B43_NPHY_RFCTL_RSSIO2
+       };
+       u16 *regs_to_store;
+       int regs_amount;
 
        u16 class;
 
@@ -1617,6 +1725,15 @@ static void b43_nphy_rev3_rssi_cal(struct b43_wldev *dev)
        u8 rx_core_state;
        int core, i, j, vcm;
 
+       if (dev->phy.rev >= 7) {
+               regs_to_store = regs_to_store_rev7;
+               regs_amount = ARRAY_SIZE(regs_to_store_rev7);
+       } else {
+               regs_to_store = regs_to_store_rev3;
+               regs_amount = ARRAY_SIZE(regs_to_store_rev3);
+       }
+       BUG_ON(regs_amount > ARRAY_SIZE(saved_regs_phy));
+
        class = b43_nphy_classifier(dev, 0, 0);
        b43_nphy_classifier(dev, 7, 4);
        b43_nphy_read_clip_detection(dev, clip_state);
@@ -1624,22 +1741,29 @@ static void b43_nphy_rev3_rssi_cal(struct b43_wldev *dev)
 
        saved_regs_phy_rfctl[0] = b43_phy_read(dev, B43_NPHY_RFCTL_INTC1);
        saved_regs_phy_rfctl[1] = b43_phy_read(dev, B43_NPHY_RFCTL_INTC2);
-       for (i = 0; i < ARRAY_SIZE(regs_to_store); i++)
+       for (i = 0; i < regs_amount; i++)
                saved_regs_phy[i] = b43_phy_read(dev, regs_to_store[i]);
 
        b43_nphy_rf_ctl_intc_override(dev, N_INTC_OVERRIDE_OFF, 0, 7);
        b43_nphy_rf_ctl_intc_override(dev, N_INTC_OVERRIDE_TRSW, 1, 7);
-       b43_nphy_rf_ctl_override(dev, 0x1, 0, 0, false);
-       b43_nphy_rf_ctl_override(dev, 0x2, 1, 0, false);
-       b43_nphy_rf_ctl_override(dev, 0x80, 1, 0, false);
-       b43_nphy_rf_ctl_override(dev, 0x40, 1, 0, false);
-
-       if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
-               b43_nphy_rf_ctl_override(dev, 0x20, 0, 0, false);
-               b43_nphy_rf_ctl_override(dev, 0x10, 1, 0, false);
+
+       if (dev->phy.rev >= 7) {
+               /* TODO */
+               if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
+               } else {
+               }
        } else {
-               b43_nphy_rf_ctl_override(dev, 0x10, 0, 0, false);
-               b43_nphy_rf_ctl_override(dev, 0x20, 1, 0, false);
+               b43_nphy_rf_ctl_override(dev, 0x1, 0, 0, false);
+               b43_nphy_rf_ctl_override(dev, 0x2, 1, 0, false);
+               b43_nphy_rf_ctl_override(dev, 0x80, 1, 0, false);
+               b43_nphy_rf_ctl_override(dev, 0x40, 1, 0, false);
+               if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
+                       b43_nphy_rf_ctl_override(dev, 0x20, 0, 0, false);
+                       b43_nphy_rf_ctl_override(dev, 0x10, 1, 0, false);
+               } else {
+                       b43_nphy_rf_ctl_override(dev, 0x10, 0, 0, false);
+                       b43_nphy_rf_ctl_override(dev, 0x20, 1, 0, false);
+               }
        }
 
        rx_core_state = b43_nphy_get_rx_core_state(dev);
@@ -1654,8 +1778,11 @@ static void b43_nphy_rev3_rssi_cal(struct b43_wldev *dev)
 
                /* Grab RSSI results for every possible VCM */
                for (vcm = 0; vcm < 8; vcm++) {
-                       b43_radio_maskset(dev, r | B2056_RX_RSSI_MISC, 0xE3,
-                                       vcm << 2);
+                       if (dev->phy.rev >= 7)
+                               ;
+                       else
+                               b43_radio_maskset(dev, r | B2056_RX_RSSI_MISC,
+                                                 0xE3, vcm << 2);
                        b43_nphy_poll_rssi(dev, N_RSSI_NB, results[vcm], 8);
                }
 
@@ -1682,8 +1809,11 @@ static void b43_nphy_rev3_rssi_cal(struct b43_wldev *dev)
                }
 
                /* Select the best VCM */
-               b43_radio_maskset(dev, r | B2056_RX_RSSI_MISC, 0xE3,
-                                 vcm_final << 2);
+               if (dev->phy.rev >= 7)
+                       ;
+               else
+                       b43_radio_maskset(dev, r | B2056_RX_RSSI_MISC,
+                                         0xE3, vcm_final << 2);
 
                for (i = 0; i < 4; i++) {
                        if (core != i / 2)
@@ -1736,9 +1866,9 @@ static void b43_nphy_rev3_rssi_cal(struct b43_wldev *dev)
 
        b43_phy_set(dev, B43_NPHY_RFCTL_OVER, 0x1);
        b43_phy_set(dev, B43_NPHY_RFCTL_CMD, B43_NPHY_RFCTL_CMD_RXTX);
-       b43_phy_mask(dev, B43_NPHY_TXF_40CO_B1S1, ~0x1);
+       b43_phy_mask(dev, B43_NPHY_RFCTL_OVER, ~0x1);
 
-       for (i = 0; i < ARRAY_SIZE(regs_to_store); i++)
+       for (i = 0; i < regs_amount; i++)
                b43_phy_write(dev, regs_to_store[i], saved_regs_phy[i]);
 
        /* Store for future configuration */
@@ -2494,8 +2624,8 @@ static void b43_nphy_workarounds_rev3plus(struct b43_wldev *dev)
        struct ssb_sprom *sprom = dev->dev->bus_sprom;
 
        /* TX to RX */
-       u8 tx2rx_events[8] = { 0x4, 0x3, 0x6, 0x5, 0x2, 0x1, 0x8, 0x1F };
-       u8 tx2rx_delays[8] = { 8, 4, 2, 2, 4, 4, 6, 1 };
+       u8 tx2rx_events[7] = { 0x4, 0x3, 0x5, 0x2, 0x1, 0x8, 0x1F };
+       u8 tx2rx_delays[7] = { 8, 4, 4, 4, 4, 6, 1 };
        /* RX to TX */
        u8 rx2tx_events_ipa[9] = { 0x0, 0x1, 0x2, 0x8, 0x5, 0x6, 0xF, 0x3,
                                        0x1F };
@@ -2503,6 +2633,23 @@ static void b43_nphy_workarounds_rev3plus(struct b43_wldev *dev)
        u8 rx2tx_events[9] = { 0x0, 0x1, 0x2, 0x8, 0x5, 0x6, 0x3, 0x4, 0x1F };
        u8 rx2tx_delays[9] = { 8, 6, 6, 4, 4, 18, 42, 1, 1 };
 
+       u16 vmids[5][4] = {
+               { 0xa2, 0xb4, 0xb4, 0x89, }, /* 0 */
+               { 0xb4, 0xb4, 0xb4, 0x24, }, /* 1 */
+               { 0xa2, 0xb4, 0xb4, 0x74, }, /* 2 */
+               { 0xa2, 0xb4, 0xb4, 0x270, }, /* 3 */
+               { 0xa2, 0xb4, 0xb4, 0x00, }, /* 4 and 5 */
+       };
+       u16 gains[5][4] = {
+               { 0x02, 0x02, 0x02, 0x00, }, /* 0 */
+               { 0x02, 0x02, 0x02, 0x02, }, /* 1 */
+               { 0x02, 0x02, 0x02, 0x04, }, /* 2 */
+               { 0x02, 0x02, 0x02, 0x00, }, /* 3 */
+               { 0x02, 0x02, 0x02, 0x00, }, /* 4 and 5 */
+       };
+       u16 *vmid, *gain;
+
+       u8 pdet_range;
        u16 tmp16;
        u32 tmp32;
 
@@ -2561,7 +2708,71 @@ static void b43_nphy_workarounds_rev3plus(struct b43_wldev *dev)
        b43_ntab_write(dev, B43_NTAB16(8, 0), 2);
        b43_ntab_write(dev, B43_NTAB16(8, 16), 2);
 
-       /* TODO */
+       if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
+               pdet_range = sprom->fem.ghz2.pdet_range;
+       else
+               pdet_range = sprom->fem.ghz5.pdet_range;
+       vmid = vmids[min_t(u16, pdet_range, 4)];
+       gain = gains[min_t(u16, pdet_range, 4)];
+       switch (pdet_range) {
+       case 3:
+               if (!(dev->phy.rev >= 4 &&
+                     b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ))
+                       break;
+               /* FALL THROUGH */
+       case 0:
+       case 1:
+               b43_ntab_write_bulk(dev, B43_NTAB16(8, 0x08), 4, vmid);
+               b43_ntab_write_bulk(dev, B43_NTAB16(8, 0x18), 4, vmid);
+               b43_ntab_write_bulk(dev, B43_NTAB16(8, 0x0c), 4, gain);
+               b43_ntab_write_bulk(dev, B43_NTAB16(8, 0x1c), 4, gain);
+               break;
+       case 2:
+               if (dev->phy.rev >= 6) {
+                       if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
+                               vmid[3] = 0x94;
+                       else
+                               vmid[3] = 0x8e;
+                       gain[3] = 3;
+               } else if (dev->phy.rev == 5) {
+                       vmid[3] = 0x84;
+                       gain[3] = 2;
+               }
+               b43_ntab_write_bulk(dev, B43_NTAB16(8, 0x08), 4, vmid);
+               b43_ntab_write_bulk(dev, B43_NTAB16(8, 0x18), 4, vmid);
+               b43_ntab_write_bulk(dev, B43_NTAB16(8, 0x0c), 4, gain);
+               b43_ntab_write_bulk(dev, B43_NTAB16(8, 0x1c), 4, gain);
+               break;
+       case 4:
+       case 5:
+               if (b43_current_band(dev->wl) != IEEE80211_BAND_2GHZ) {
+                       if (pdet_range == 4) {
+                               vmid[3] = 0x8e;
+                               tmp16 = 0x96;
+                               gain[3] = 0x2;
+                       } else {
+                               vmid[3] = 0x89;
+                               tmp16 = 0x89;
+                               gain[3] = 0;
+                       }
+               } else {
+                       if (pdet_range == 4) {
+                               vmid[3] = 0x89;
+                               tmp16 = 0x8b;
+                               gain[3] = 0x2;
+                       } else {
+                               vmid[3] = 0x74;
+                               tmp16 = 0x70;
+                               gain[3] = 0;
+                       }
+               }
+               b43_ntab_write_bulk(dev, B43_NTAB16(8, 0x08), 4, vmid);
+               b43_ntab_write_bulk(dev, B43_NTAB16(8, 0x0c), 4, gain);
+               vmid[3] = tmp16;
+               b43_ntab_write_bulk(dev, B43_NTAB16(8, 0x18), 4, vmid);
+               b43_ntab_write_bulk(dev, B43_NTAB16(8, 0x1c), 4, gain);
+               break;
+       }
 
        b43_radio_write(dev, B2056_RX0 | B2056_RX_MIXA_MAST_BIAS, 0x00);
        b43_radio_write(dev, B2056_RX1 | B2056_RX_MIXA_MAST_BIAS, 0x00);
@@ -2600,7 +2811,7 @@ static void b43_nphy_workarounds_rev3plus(struct b43_wldev *dev)
        /* Dropped probably-always-true condition */
        b43_phy_write(dev, B43_NPHY_ED_CRS40ASSERTTHRESH0, 0x03eb);
        b43_phy_write(dev, B43_NPHY_ED_CRS40ASSERTTHRESH1, 0x03eb);
-       b43_phy_write(dev, B43_NPHY_ED_CRS40DEASSERTTHRESH1, 0x0341);
+       b43_phy_write(dev, B43_NPHY_ED_CRS40DEASSERTTHRESH0, 0x0341);
        b43_phy_write(dev, B43_NPHY_ED_CRS40DEASSERTTHRESH1, 0x0341);
        b43_phy_write(dev, B43_NPHY_ED_CRS20LASSERTTHRESH0, 0x042b);
        b43_phy_write(dev, B43_NPHY_ED_CRS20LASSERTTHRESH1, 0x042b);
@@ -3211,6 +3422,20 @@ static void b43_nphy_tx_prepare_adjusted_power_table(struct b43_wldev *dev)
        u8 idx, delta;
        u8 i, stf_mode;
 
+       /* Array adj_pwr_tbl corresponds to the hardware table. It consists of
+        * 21 groups, each containing 4 entries.
+        *
+        * First group has entries for CCK modulation.
+        * The rest of groups has 1 entry per modulation (SISO, CDD, STBC, SDM).
+        *
+        * Group 0 is for CCK
+        * Groups 1..4 use BPSK (group per coding rate)
+        * Groups 5..8 use QPSK (group per coding rate)
+        * Groups 9..12 use 16-QAM (group per coding rate)
+        * Groups 13..16 use 64-QAM (group per coding rate)
+        * Groups 17..20 are unknown
+        */
+
        for (i = 0; i < 4; i++)
                nphy->adj_pwr_tbl[i] = nphy->tx_power_offset[i];
 
@@ -3409,10 +3634,8 @@ static void b43_nphy_tx_power_ctl_setup(struct b43_wldev *dev)
        }
 
        b43_nphy_tx_prepare_adjusted_power_table(dev);
-       /*
        b43_ntab_write_bulk(dev, B43_NTAB16(26, 64), 84, nphy->adj_pwr_tbl);
        b43_ntab_write_bulk(dev, B43_NTAB16(27, 64), 84, nphy->adj_pwr_tbl);
-       */
 
        if (nphy->hang_avoid)
                b43_nphy_stay_in_carrier_search(dev, false);
@@ -5124,7 +5347,7 @@ static int b43_phy_initn(struct b43_wldev *dev)
        b43_phy_write(dev, B43_NPHY_TXMACIF_HOLDOFF, 0x0015);
        b43_phy_write(dev, B43_NPHY_TXMACDELAY, 0x0320);
        if (phy->rev >= 3 && phy->rev <= 6)
-               b43_phy_write(dev, B43_NPHY_PLOAD_CSENSE_EXTLEN, 0x0014);
+               b43_phy_write(dev, B43_NPHY_PLOAD_CSENSE_EXTLEN, 0x0032);
        b43_nphy_tx_lp_fbw(dev);
        if (phy->rev >= 3)
                b43_nphy_spur_workaround(dev);
@@ -5441,8 +5664,11 @@ static u16 b43_nphy_op_radio_read(struct b43_wldev *dev, u16 reg)
 {
        /* Register 1 is a 32-bit register. */
        B43_WARN_ON(reg == 1);
-       /* N-PHY needs 0x100 for read access */
-       reg |= 0x100;
+
+       if (dev->phy.rev >= 7)
+               reg |= 0x200; /* Radio 0x2057 */
+       else
+               reg |= 0x100;
 
        b43_write16(dev, B43_MMIO_RADIO_CONTROL, reg);
        return b43_read16(dev, B43_MMIO_RADIO_DATA_LOW);
index b4fd9345d673a3542d94e66260ebc685eed7ef8d..a07e4cacab7727619dedb2f1e7b642e39d23fb5f 100644 (file)
@@ -48,7 +48,7 @@ struct b2056_inittabs_pts {
        unsigned int rx_length;
 };
 
-static const struct b2056_inittab_entry b2056_inittab_rev3_syn[] = {
+static const struct b2056_inittab_entry b2056_inittab_phy_rev3_syn[] = {
        [B2056_SYN_RESERVED_ADDR2]      = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
        [B2056_SYN_RESERVED_ADDR3]      = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
        [B2056_SYN_RESERVED_ADDR4]      = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
@@ -232,7 +232,7 @@ static const struct b2056_inittab_entry b2056_inittab_rev3_syn[] = {
        [B2056_SYN_LOGEN_TX_CMOS_VALID] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
 };
 
-static const struct b2056_inittab_entry b2056_inittab_rev3_tx[] = {
+static const struct b2056_inittab_entry b2056_inittab_phy_rev3_tx[] = {
        [B2056_TX_RESERVED_ADDR2]       = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
        [B2056_TX_RESERVED_ADDR3]       = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
        [B2056_TX_RESERVED_ADDR4]       = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
@@ -380,7 +380,7 @@ static const struct b2056_inittab_entry b2056_inittab_rev3_tx[] = {
        [B2056_TX_STATUS_TXLPF_RC]      = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
 };
 
-static const struct b2056_inittab_entry b2056_inittab_rev3_rx[] = {
+static const struct b2056_inittab_entry b2056_inittab_phy_rev3_rx[] = {
        [B2056_RX_RESERVED_ADDR2]       = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
        [B2056_RX_RESERVED_ADDR3]       = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
        [B2056_RX_RESERVED_ADDR4]       = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
@@ -530,7 +530,7 @@ static const struct b2056_inittab_entry b2056_inittab_rev3_rx[] = {
        [B2056_RX_STATUS_HPC_RC]        = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
 };
 
-static const struct b2056_inittab_entry b2056_inittab_rev4_syn[] = {
+static const struct b2056_inittab_entry b2056_inittab_phy_rev4_syn[] = {
        [B2056_SYN_RESERVED_ADDR2]      = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
        [B2056_SYN_RESERVED_ADDR3]      = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
        [B2056_SYN_RESERVED_ADDR4]      = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
@@ -714,7 +714,7 @@ static const struct b2056_inittab_entry b2056_inittab_rev4_syn[] = {
        [B2056_SYN_LOGEN_TX_CMOS_VALID] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
 };
 
-static const struct b2056_inittab_entry b2056_inittab_rev4_tx[] = {
+static const struct b2056_inittab_entry b2056_inittab_phy_rev4_tx[] = {
        [B2056_TX_RESERVED_ADDR2]       = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
        [B2056_TX_RESERVED_ADDR3]       = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
        [B2056_TX_RESERVED_ADDR4]       = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
@@ -862,7 +862,7 @@ static const struct b2056_inittab_entry b2056_inittab_rev4_tx[] = {
        [B2056_TX_STATUS_TXLPF_RC]      = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
 };
 
-static const struct b2056_inittab_entry b2056_inittab_rev4_rx[] = {
+static const struct b2056_inittab_entry b2056_inittab_phy_rev4_rx[] = {
        [B2056_RX_RESERVED_ADDR2]       = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
        [B2056_RX_RESERVED_ADDR3]       = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
        [B2056_RX_RESERVED_ADDR4]       = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
@@ -1012,7 +1012,7 @@ static const struct b2056_inittab_entry b2056_inittab_rev4_rx[] = {
        [B2056_RX_STATUS_HPC_RC]        = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
 };
 
-static const struct b2056_inittab_entry b2056_inittab_rev5_syn[] = {
+static const struct b2056_inittab_entry b2056_inittab_radio_rev5_syn[] = {
        [B2056_SYN_RESERVED_ADDR2]      = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
        [B2056_SYN_RESERVED_ADDR3]      = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
        [B2056_SYN_RESERVED_ADDR4]      = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
@@ -1196,7 +1196,7 @@ static const struct b2056_inittab_entry b2056_inittab_rev5_syn[] = {
        [B2056_SYN_LOGEN_TX_CMOS_VALID] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
 };
 
-static const struct b2056_inittab_entry b2056_inittab_rev5_tx[] = {
+static const struct b2056_inittab_entry b2056_inittab_radio_rev5_tx[] = {
        [B2056_TX_RESERVED_ADDR2]       = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
        [B2056_TX_RESERVED_ADDR3]       = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
        [B2056_TX_RESERVED_ADDR4]       = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
@@ -1352,7 +1352,7 @@ static const struct b2056_inittab_entry b2056_inittab_rev5_tx[] = {
        [B2056_TX_GMBB_IDAC7]           = { .ghz5 = 0x0075, .ghz2 = 0x0075, UPLOAD, },
 };
 
-static const struct b2056_inittab_entry b2056_inittab_rev5_rx[] = {
+static const struct b2056_inittab_entry b2056_inittab_radio_rev5_rx[] = {
        [B2056_RX_RESERVED_ADDR2]       = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
        [B2056_RX_RESERVED_ADDR3]       = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
        [B2056_RX_RESERVED_ADDR4]       = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
@@ -1502,7 +1502,7 @@ static const struct b2056_inittab_entry b2056_inittab_rev5_rx[] = {
        [B2056_RX_STATUS_HPC_RC]        = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
 };
 
-static const struct b2056_inittab_entry b2056_inittab_rev6_syn[] = {
+static const struct b2056_inittab_entry b2056_inittab_radio_rev6_syn[] = {
        [B2056_SYN_RESERVED_ADDR2]      = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
        [B2056_SYN_RESERVED_ADDR3]      = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
        [B2056_SYN_RESERVED_ADDR4]      = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
@@ -1686,7 +1686,7 @@ static const struct b2056_inittab_entry b2056_inittab_rev6_syn[] = {
        [B2056_SYN_LOGEN_TX_CMOS_VALID] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
 };
 
-static const struct b2056_inittab_entry b2056_inittab_rev6_tx[] = {
+static const struct b2056_inittab_entry b2056_inittab_radio_rev6_tx[] = {
        [B2056_TX_RESERVED_ADDR2]       = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
        [B2056_TX_RESERVED_ADDR3]       = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
        [B2056_TX_RESERVED_ADDR4]       = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
@@ -1842,7 +1842,7 @@ static const struct b2056_inittab_entry b2056_inittab_rev6_tx[] = {
        [B2056_TX_GMBB_IDAC7]           = { .ghz5 = 0x0070, .ghz2 = 0x0070, NOUPLOAD, },
 };
 
-static const struct b2056_inittab_entry b2056_inittab_rev6_rx[] = {
+static const struct b2056_inittab_entry b2056_inittab_radio_rev6_rx[] = {
        [B2056_RX_RESERVED_ADDR2]       = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
        [B2056_RX_RESERVED_ADDR3]       = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
        [B2056_RX_RESERVED_ADDR4]       = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
@@ -1992,7 +1992,7 @@ static const struct b2056_inittab_entry b2056_inittab_rev6_rx[] = {
        [B2056_RX_STATUS_HPC_RC]        = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
 };
 
-static const struct b2056_inittab_entry b2056_inittab_rev7_syn[] = {
+static const struct b2056_inittab_entry b2056_inittab_radio_rev7_9_syn[] = {
        [B2056_SYN_RESERVED_ADDR2]      = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
        [B2056_SYN_RESERVED_ADDR3]      = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
        [B2056_SYN_RESERVED_ADDR4]      = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
@@ -2176,7 +2176,7 @@ static const struct b2056_inittab_entry b2056_inittab_rev7_syn[] = {
        [B2056_SYN_LOGEN_TX_CMOS_VALID] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
 };
 
-static const struct b2056_inittab_entry b2056_inittab_rev7_tx[] = {
+static const struct b2056_inittab_entry b2056_inittab_radio_rev7_9_tx[] = {
        [B2056_TX_RESERVED_ADDR2]       = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
        [B2056_TX_RESERVED_ADDR3]       = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
        [B2056_TX_RESERVED_ADDR4]       = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
@@ -2332,7 +2332,7 @@ static const struct b2056_inittab_entry b2056_inittab_rev7_tx[] = {
        [B2056_TX_GMBB_IDAC7]           = { .ghz5 = 0x0075, .ghz2 = 0x0075, UPLOAD, },
 };
 
-static const struct b2056_inittab_entry b2056_inittab_rev7_rx[] = {
+static const struct b2056_inittab_entry b2056_inittab_radio_rev7_9_rx[] = {
        [B2056_RX_RESERVED_ADDR2]       = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
        [B2056_RX_RESERVED_ADDR3]       = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
        [B2056_RX_RESERVED_ADDR4]       = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
@@ -2482,7 +2482,7 @@ static const struct b2056_inittab_entry b2056_inittab_rev7_rx[] = {
        [B2056_RX_STATUS_HPC_RC]        = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
 };
 
-static const struct b2056_inittab_entry b2056_inittab_rev8_syn[] = {
+static const struct b2056_inittab_entry b2056_inittab_radio_rev8_syn[] = {
        [B2056_SYN_RESERVED_ADDR2]      = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
        [B2056_SYN_RESERVED_ADDR3]      = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
        [B2056_SYN_RESERVED_ADDR4]      = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
@@ -2666,7 +2666,7 @@ static const struct b2056_inittab_entry b2056_inittab_rev8_syn[] = {
        [B2056_SYN_LOGEN_TX_CMOS_VALID] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
 };
 
-static const struct b2056_inittab_entry b2056_inittab_rev8_tx[] = {
+static const struct b2056_inittab_entry b2056_inittab_radio_rev8_tx[] = {
        [B2056_TX_RESERVED_ADDR2]       = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
        [B2056_TX_RESERVED_ADDR3]       = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
        [B2056_TX_RESERVED_ADDR4]       = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
@@ -2822,7 +2822,7 @@ static const struct b2056_inittab_entry b2056_inittab_rev8_tx[] = {
        [B2056_TX_GMBB_IDAC7]           = { .ghz5 = 0x0070, .ghz2 = 0x0070, NOUPLOAD, },
 };
 
-static const struct b2056_inittab_entry b2056_inittab_rev8_rx[] = {
+static const struct b2056_inittab_entry b2056_inittab_radio_rev8_rx[] = {
        [B2056_RX_RESERVED_ADDR2]       = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
        [B2056_RX_RESERVED_ADDR3]       = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
        [B2056_RX_RESERVED_ADDR4]       = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
@@ -2972,24 +2972,69 @@ static const struct b2056_inittab_entry b2056_inittab_rev8_rx[] = {
        [B2056_RX_STATUS_HPC_RC]        = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
 };
 
-#define INITTABSPTS(prefix) \
-       .syn            = prefix##_syn,                 \
-       .syn_length     = ARRAY_SIZE(prefix##_syn),     \
-       .tx             = prefix##_tx,                  \
-       .tx_length      = ARRAY_SIZE(prefix##_tx),      \
-       .rx             = prefix##_rx,                  \
-       .rx_length      = ARRAY_SIZE(prefix##_rx)
+static const struct b2056_inittab_entry b2056_inittab_radio_rev11_syn[] = {
+       [B2056_SYN_PLL_PFD]             = { .ghz5 = 0x0006, .ghz2 = 0x0006, UPLOAD, },
+       [B2056_SYN_PLL_CP2]             = { .ghz5 = 0x003f, .ghz2 = 0x003f, UPLOAD, },
+       [B2056_SYN_PLL_LOOPFILTER1]     = { .ghz5 = 0x0006, .ghz2 = 0x0006, UPLOAD, },
+       [B2056_SYN_PLL_LOOPFILTER2]     = { .ghz5 = 0x0006, .ghz2 = 0x0006, UPLOAD, },
+       [B2056_SYN_PLL_LOOPFILTER4]     = { .ghz5 = 0x002b, .ghz2 = 0x002b, UPLOAD, },
+       [B2056_SYN_PLL_VCO2]            = { .ghz5 = 0x00f7, .ghz2 = 0x00f7, UPLOAD, },
+       [B2056_SYN_PLL_VCOCAL12]        = { .ghz5 = 0x0007, .ghz2 = 0x0007, UPLOAD, },
+       [B2056_SYN_LOGENBUF2]           = { .ghz5 = 0x008f, .ghz2 = 0x008f, UPLOAD, },
+};
 
-static const struct b2056_inittabs_pts b2056_inittabs[] = {
-       [3] = { INITTABSPTS(b2056_inittab_rev3) },
-       [4] = { INITTABSPTS(b2056_inittab_rev4) },
-       [5] = { INITTABSPTS(b2056_inittab_rev5) },
-       [6] = { INITTABSPTS(b2056_inittab_rev6) },
-       [7] = { INITTABSPTS(b2056_inittab_rev7) },
-       [8] = { INITTABSPTS(b2056_inittab_rev8) },
-       [9] = { INITTABSPTS(b2056_inittab_rev7) },
+static const struct b2056_inittab_entry b2056_inittab_radio_rev11_tx[] = {
+       [B2056_TX_PA_SPARE2]            = { .ghz5 = 0x00ee, .ghz2 = 0x00ee, UPLOAD, },
+       [B2056_TX_INTPAA_IAUX_STAT]     = { .ghz5 = 0x0050, .ghz2 = 0x0050, UPLOAD, },
+       [B2056_TX_INTPAA_IMAIN_STAT]    = { .ghz5 = 0x0050, .ghz2 = 0x0050, UPLOAD, },
+       [B2056_TX_INTPAA_PASLOPE]       = { .ghz5 = 0x00f0, .ghz2 = 0x00f0, UPLOAD, },
+       [B2056_TX_INTPAG_PASLOPE]       = { .ghz5 = 0x00f0, .ghz2 = 0x00f0, UPLOAD, },
+       [B2056_TX_PADA_IDAC]            = { .ghz5 = 0x00ff, .ghz2 = 0x00ff, UPLOAD, },
+       [B2056_TX_PADA_SLOPE]           = { .ghz5 = 0x0070, .ghz2 = 0x0070, UPLOAD, },
+       [B2056_TX_PADG_SLOPE]           = { .ghz5 = 0x0070, .ghz2 = 0x0070, UPLOAD, },
+       [B2056_TX_PGAA_IDAC]            = { .ghz5 = 0x00ff, .ghz2 = 0x00ff, UPLOAD, },
+       [B2056_TX_PGAA_SLOPE]           = { .ghz5 = 0x0077, .ghz2 = 0x0077, UPLOAD, },
+       [B2056_TX_PGAG_SLOPE]           = { .ghz5 = 0x0077, .ghz2 = 0x0077, UPLOAD, },
+       [B2056_TX_GMBB_IDAC]            = { .ghz5 = 0x0000, .ghz2 = 0x0000, UPLOAD, },
+       [B2056_TX_TXSPARE1]             = { .ghz5 = 0x0030, .ghz2 = 0x0030, UPLOAD, },
+};
+
+static const struct b2056_inittab_entry b2056_inittab_radio_rev11_rx[] = {
+       [B2056_RX_BIASPOLE_LNAA1_IDAC]  = { .ghz5 = 0x0017, .ghz2 = 0x0017, UPLOAD, },
+       [B2056_RX_LNAA2_IDAC]           = { .ghz5 = 0x00ff, .ghz2 = 0x00ff, UPLOAD, },
+       [B2056_RX_BIASPOLE_LNAG1_IDAC]  = { .ghz5 = 0x0017, .ghz2 = 0x0017, UPLOAD, },
+       [B2056_RX_LNAG2_IDAC]           = { .ghz5 = 0x00f0, .ghz2 = 0x00f0, UPLOAD, },
+       [B2056_RX_MIXA_VCM]             = { .ghz5 = 0x0055, .ghz2 = 0x0055, UPLOAD, },
+       [B2056_RX_MIXA_LOB_BIAS]        = { .ghz5 = 0x0088, .ghz2 = 0x0088, UPLOAD, },
+       [B2056_RX_MIXA_BIAS_AUX]        = { .ghz5 = 0x0007, .ghz2 = 0x0007, UPLOAD, },
+       [B2056_RX_MIXG_VCM]             = { .ghz5 = 0x0055, .ghz2 = 0x0055, UPLOAD, },
+       [B2056_RX_TIA_IOPAMP]           = { .ghz5 = 0x0026, .ghz2 = 0x0026, UPLOAD, },
+       [B2056_RX_TIA_QOPAMP]           = { .ghz5 = 0x0026, .ghz2 = 0x0026, UPLOAD, },
+       [B2056_RX_TIA_IMISC]            = { .ghz5 = 0x000f, .ghz2 = 0x000f, UPLOAD, },
+       [B2056_RX_TIA_QMISC]            = { .ghz5 = 0x000f, .ghz2 = 0x000f, UPLOAD, },
+       [B2056_RX_RXLPF_OUTVCM]         = { .ghz5 = 0x0004, .ghz2 = 0x0004, UPLOAD, },
+       [B2056_RX_VGA_BIAS_DCCANCEL]    = { .ghz5 = 0x0000, .ghz2 = 0x0000, UPLOAD, },
+       [B2056_RX_RXSPARE3]             = { .ghz5 = 0x0005, .ghz2 = 0x0005, UPLOAD, },
 };
 
+#define INITTABSPTS(prefix) \
+       static const struct b2056_inittabs_pts prefix = {       \
+               .syn            = prefix##_syn,                 \
+               .syn_length     = ARRAY_SIZE(prefix##_syn),     \
+               .tx             = prefix##_tx,                  \
+               .tx_length      = ARRAY_SIZE(prefix##_tx),      \
+               .rx             = prefix##_rx,                  \
+               .rx_length      = ARRAY_SIZE(prefix##_rx),      \
+       }
+
+INITTABSPTS(b2056_inittab_phy_rev3);
+INITTABSPTS(b2056_inittab_phy_rev4);
+INITTABSPTS(b2056_inittab_radio_rev5);
+INITTABSPTS(b2056_inittab_radio_rev6);
+INITTABSPTS(b2056_inittab_radio_rev7_9);
+INITTABSPTS(b2056_inittab_radio_rev8);
+INITTABSPTS(b2056_inittab_radio_rev11);
+
 #define RADIOREGS3(r00, r01, r02, r03, r04, r05, r06, r07, r08, r09, \
                   r10, r11, r12, r13, r14, r15, r16, r17, r18, r19, \
                   r20, r21, r22, r23, r24, r25, r26, r27, r28, r29, \
@@ -3041,7 +3086,7 @@ static const struct b2056_inittabs_pts b2056_inittabs[] = {
        .phy_regs.phy_bw6       = r5
 
 /* http://bcm-v4.sipsolutions.net/802.11/Radio/2056/ChannelTable */
-static const struct b43_nphy_channeltab_entry_rev3 b43_nphy_channeltab_rev3[] = {
+static const struct b43_nphy_channeltab_entry_rev3 b43_nphy_channeltab_phy_rev3[] = {
   {    .freq                   = 4920,
        RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xec, 0x05, 0x05, 0x04,
                   0x0c, 0x01, 0x00, 0x00, 0x00, 0x8f, 0x0f, 0x00,
@@ -4036,7 +4081,7 @@ static const struct b43_nphy_channeltab_entry_rev3 b43_nphy_channeltab_rev3[] =
   },
 };
 
-static const struct b43_nphy_channeltab_entry_rev3 b43_nphy_channeltab_rev4[] = {
+static const struct b43_nphy_channeltab_entry_rev3 b43_nphy_channeltab_phy_rev4[] = {
   {    .freq                   = 4920,
        RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xec, 0x05, 0x05, 0x04,
                   0x0c, 0x01, 0x00, 0x00, 0x00, 0x8f, 0x0f, 0x00,
@@ -5031,7 +5076,7 @@ static const struct b43_nphy_channeltab_entry_rev3 b43_nphy_channeltab_rev4[] =
   },
 };
 
-static const struct b43_nphy_channeltab_entry_rev3 b43_nphy_channeltab_rev5[] = {
+static const struct b43_nphy_channeltab_entry_rev3 b43_nphy_channeltab_radio_rev5[] = {
   {    .freq                   = 4920,
        RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xec, 0x05, 0x05, 0x04,
                   0x0c, 0x01, 0x00, 0x00, 0x00, 0x8f, 0x0f, 0x00,
@@ -6026,7 +6071,7 @@ static const struct b43_nphy_channeltab_entry_rev3 b43_nphy_channeltab_rev5[] =
   },
 };
 
-static const struct b43_nphy_channeltab_entry_rev3 b43_nphy_channeltab_rev6[] = {
+static const struct b43_nphy_channeltab_entry_rev3 b43_nphy_channeltab_radio_rev6[] = {
   {    .freq                   = 4920,
        RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xec, 0x05, 0x05, 0x04,
                   0x0c, 0x01, 0x00, 0x00, 0x00, 0x8f, 0x0f, 0x00,
@@ -7021,7 +7066,7 @@ static const struct b43_nphy_channeltab_entry_rev3 b43_nphy_channeltab_rev6[] =
   },
 };
 
-static const struct b43_nphy_channeltab_entry_rev3 b43_nphy_channeltab_rev7_9[] = {
+static const struct b43_nphy_channeltab_entry_rev3 b43_nphy_channeltab_radio_rev7_9[] = {
   {    .freq                   = 4920,
        RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xec, 0x05, 0x05, 0x04,
                   0x0c, 0x01, 0x00, 0x00, 0x00, 0x8f, 0x0f, 0x00,
@@ -8016,7 +8061,7 @@ static const struct b43_nphy_channeltab_entry_rev3 b43_nphy_channeltab_rev7_9[]
   },
 };
 
-static const struct b43_nphy_channeltab_entry_rev3 b43_nphy_channeltab_rev8[] = {
+static const struct b43_nphy_channeltab_entry_rev3 b43_nphy_channeltab_radio_rev8[] = {
   {    .freq                   = 4920,
        RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xec, 0x05, 0x05, 0x04,
                   0x0c, 0x01, 0x00, 0x00, 0x00, 0x8f, 0x0f, 0x00,
@@ -9011,6 +9056,236 @@ static const struct b43_nphy_channeltab_entry_rev3 b43_nphy_channeltab_rev8[] =
   },
 };
 
+static const struct b43_nphy_channeltab_entry_rev3 b43_nphy_channeltab_radio_rev11[] = {
+       {
+               .freq                   = 5180,
+               RADIOREGS3(0xb6, 0x01, 0x01, 0x02, 0x06, 0x05, 0x05, 0x02,
+                          0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
+                          0xff, 0xf9, 0x00, 0x06, 0x00, 0x77, 0x00, 0x0e,
+                          0x00, 0x6f, 0x00, 0xf9, 0x00, 0x06, 0x00, 0x77,
+                          0x00, 0x0e, 0x00, 0x6f, 0x00),
+               PHYREGS(0x081c, 0x0818, 0x0814, 0x01f9, 0x01fa, 0x01fb),
+       },
+       {
+               .freq                   = 5200,
+               RADIOREGS3(0xaf, 0x01, 0x01, 0x02, 0x08, 0x05, 0x05, 0x02,
+                          0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
+                          0xff, 0xf9, 0x00, 0x05, 0x00, 0x77, 0x00, 0x0d,
+                          0x00, 0x6f, 0x00, 0xf9, 0x00, 0x05, 0x00, 0x77,
+                          0x00, 0x0d, 0x00, 0x6f, 0x00),
+               PHYREGS(0x0824, 0x0820, 0x081c, 0x01f7, 0x01f8, 0x01f9),
+       },
+       {
+               .freq                   = 5220,
+               RADIOREGS3(0xa7, 0x01, 0x01, 0x02, 0x0a, 0x05, 0x05, 0x02,
+                          0x0c, 0x01, 0x02, 0x02, 0x02, 0x8e, 0x0f, 0x00,
+                          0xfe, 0xd8, 0x00, 0x05, 0x00, 0x77, 0x00, 0x0d,
+                          0x00, 0x6f, 0x00, 0xd8, 0x00, 0x05, 0x00, 0x77,
+                          0x00, 0x0d, 0x00, 0x6f, 0x00),
+               PHYREGS(0x082c, 0x0828, 0x0824, 0x01f5, 0x01f6, 0x01f7),
+       },
+       {
+               .freq                   = 5745,
+               RADIOREGS3(0xfe, 0x00, 0x02, 0x04, 0x7d, 0x05, 0x05, 0x02,
+                          0x15, 0x01, 0x05, 0x05, 0x05, 0x87, 0x05, 0x00,
+                          0x20, 0x30, 0x00, 0x00, 0x00, 0x77, 0x00, 0x06,
+                          0x00, 0x6d, 0x00, 0x30, 0x00, 0x00, 0x00, 0x77,
+                          0x00, 0x06, 0x00, 0x6d, 0x00),
+               PHYREGS(0x08fe, 0x08fa, 0x08f6, 0x01c8, 0x01c8, 0x01c9),
+       },
+       {
+               .freq                   = 5765,
+               RADIOREGS3(0xf8, 0x00, 0x02, 0x04, 0x81, 0x05, 0x05, 0x02,
+                          0x15, 0x01, 0x05, 0x05, 0x05, 0x86, 0x05, 0x00,
+                          0x10, 0x10, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
+                          0x00, 0x6c, 0x00, 0x10, 0x00, 0x00, 0x00, 0x77,
+                          0x00, 0x05, 0x00, 0x6c, 0x00),
+               PHYREGS(0x0906, 0x0902, 0x08fe, 0x01c6, 0x01c7, 0x01c8),
+       },
+       {
+               .freq                   = 5785,
+               RADIOREGS3(0xf2, 0x00, 0x02, 0x04, 0x85, 0x05, 0x05, 0x02,
+                          0x15, 0x01, 0x06, 0x06, 0x06, 0x86, 0x04, 0x00,
+                          0x00, 0x10, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
+                          0x00, 0x6b, 0x00, 0x10, 0x00, 0x00, 0x00, 0x77,
+                          0x00, 0x05, 0x00, 0x6b, 0x00),
+               PHYREGS(0x090e, 0x090a, 0x0906, 0x01c4, 0x01c5, 0x01c6),
+       },
+       {
+               .freq                   = 5805,
+               RADIOREGS3(0xed, 0x00, 0x02, 0x04, 0x89, 0x05, 0x05, 0x02,
+                          0x15, 0x01, 0x06, 0x06, 0x06, 0x86, 0x04, 0x00,
+                          0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
+                          0x00, 0x6a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77,
+                          0x00, 0x05, 0x00, 0x6a, 0x00),
+               PHYREGS(0x0916, 0x0912, 0x090e, 0x01c3, 0x01c4, 0x01c4),
+       },
+       {
+               .freq                   = 5825,
+               RADIOREGS3(0xed, 0x00, 0x02, 0x04, 0x8d, 0x05, 0x05, 0x02,
+                          0x15, 0x01, 0x06, 0x06, 0x06, 0x86, 0x04, 0x00,
+                          0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
+                          0x00, 0x69, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77,
+                          0x00, 0x05, 0x00, 0x69, 0x00),
+               PHYREGS(0x091e, 0x091a, 0x0916, 0x01c1, 0x01c2, 0x01c3),
+       },
+       {
+               .freq                   = 2412,
+               RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x6c, 0x06, 0x06, 0x04,
+                          0x2b, 0x01, 0x04, 0x04, 0x04, 0x8f, 0x30, 0x00,
+                          0x00, 0x00, 0x78, 0x00, 0x03, 0x00, 0x70, 0x00,
+                          0x0b, 0x00, 0x0a, 0x00, 0x89, 0x00, 0x03, 0x00,
+                          0x70, 0x00, 0x0b, 0x00, 0x0a),
+               PHYREGS(0x03c9, 0x03c5, 0x03c1, 0x043a, 0x043f, 0x0443),
+       },
+       {
+               .freq                   = 2417,
+               RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x71, 0x06, 0x06, 0x04,
+                          0x2b, 0x01, 0x05, 0x05, 0x05, 0x8f, 0x30, 0x00,
+                          0x00, 0x00, 0x78, 0x00, 0x03, 0x00, 0x70, 0x00,
+                          0x0b, 0x00, 0x0a, 0x00, 0x89, 0x00, 0x03, 0x00,
+                          0x70, 0x00, 0x0b, 0x00, 0x0a),
+               PHYREGS(0x03cb, 0x03c7, 0x03c3, 0x0438, 0x043d, 0x0441),
+       },
+       {
+               .freq                   = 2422,
+               RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x76, 0x06, 0x06, 0x04,
+                          0x2b, 0x01, 0x05, 0x05, 0x05, 0x8f, 0x30, 0x00,
+                          0x00, 0x00, 0x67, 0x00, 0x03, 0x00, 0x70, 0x00,
+                          0x0b, 0x00, 0x0a, 0x00, 0x89, 0x00, 0x03, 0x00,
+                          0x70, 0x00, 0x0b, 0x00, 0x0a),
+               PHYREGS(0x03cd, 0x03c9, 0x03c5, 0x0436, 0x043a, 0x043f),
+       },
+       {
+               .freq                   = 2427,
+               RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x7b, 0x06, 0x06, 0x04,
+                          0x2b, 0x01, 0x05, 0x05, 0x05, 0x8f, 0x30, 0x00,
+                          0x00, 0x00, 0x57, 0x00, 0x03, 0x00, 0x70, 0x00,
+                          0x0a, 0x00, 0x0a, 0x00, 0x78, 0x00, 0x03, 0x00,
+                          0x70, 0x00, 0x0a, 0x00, 0x0a),
+               PHYREGS(0x03cf, 0x03cb, 0x03c7, 0x0434, 0x0438, 0x043d),
+       },
+       {
+               .freq                   = 2432,
+               RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x80, 0x06, 0x06, 0x04,
+                          0x2b, 0x01, 0x05, 0x05, 0x05, 0x8f, 0x30, 0x00,
+                          0x00, 0x00, 0x56, 0x00, 0x03, 0x00, 0x70, 0x00,
+                          0x0a, 0x00, 0x0a, 0x00, 0x77, 0x00, 0x03, 0x00,
+                          0x70, 0x00, 0x0a, 0x00, 0x0a),
+               PHYREGS(0x03d1, 0x03cd, 0x03c9, 0x0431, 0x0436, 0x043a),
+       },
+       {
+               .freq                   = 2437,
+               RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x85, 0x06, 0x06, 0x04,
+                          0x2b, 0x01, 0x05, 0x05, 0x05, 0x8f, 0x30, 0x00,
+                          0x00, 0x00, 0x46, 0x00, 0x03, 0x00, 0x70, 0x00,
+                          0x0a, 0x00, 0x0a, 0x00, 0x76, 0x00, 0x03, 0x00,
+                          0x70, 0x00, 0x0a, 0x00, 0x0a),
+               PHYREGS(0x03d3, 0x03cf, 0x03cb, 0x042f, 0x0434, 0x0438),
+       },
+       {
+               .freq                   = 2442,
+               RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x8a, 0x06, 0x06, 0x04,
+                          0x2b, 0x01, 0x05, 0x05, 0x05, 0x8f, 0x30, 0x00,
+                          0x00, 0x00, 0x45, 0x00, 0x02, 0x00, 0x70, 0x00,
+                          0x0a, 0x00, 0x0a, 0x00, 0x66, 0x00, 0x02, 0x00,
+                          0x70, 0x00, 0x0a, 0x00, 0x0a),
+               PHYREGS(0x03d5, 0x03d1, 0x03cd, 0x042d, 0x0431, 0x0436),
+       },
+       {
+               .freq                   = 2447,
+               RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x8f, 0x06, 0x06, 0x04,
+                          0x2b, 0x01, 0x06, 0x06, 0x06, 0x8f, 0x30, 0x00,
+                          0x00, 0x00, 0x34, 0x00, 0x02, 0x00, 0x70, 0x00,
+                          0x0a, 0x00, 0x09, 0x00, 0x55, 0x00, 0x02, 0x00,
+                          0x70, 0x00, 0x0a, 0x00, 0x09),
+               PHYREGS(0x03d7, 0x03d3, 0x03cf, 0x042b, 0x042f, 0x0434),
+       },
+       {
+               .freq                   = 2452,
+               RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x94, 0x06, 0x06, 0x04,
+                          0x2b, 0x01, 0x06, 0x06, 0x06, 0x8f, 0x30, 0x00,
+                          0x00, 0x00, 0x23, 0x00, 0x02, 0x00, 0x70, 0x00,
+                          0x0a, 0x00, 0x09, 0x00, 0x45, 0x00, 0x02, 0x00,
+                          0x70, 0x00, 0x0a, 0x00, 0x09),
+               PHYREGS(0x03d9, 0x03d5, 0x03d1, 0x0429, 0x042d, 0x0431),
+       },
+       {
+               .freq                   = 2457,
+               RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x99, 0x06, 0x06, 0x04,
+                          0x2b, 0x01, 0x06, 0x06, 0x06, 0x8f, 0x30, 0x00,
+                          0x00, 0x00, 0x12, 0x00, 0x02, 0x00, 0x70, 0x00,
+                          0x0a, 0x00, 0x09, 0x00, 0x34, 0x00, 0x02, 0x00,
+                          0x70, 0x00, 0x0a, 0x00, 0x09),
+               PHYREGS(0x03db, 0x03d7, 0x03d3, 0x0427, 0x042b, 0x042f),
+       },
+       {
+               .freq                   = 2462,
+               RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x9e, 0x06, 0x06, 0x04,
+                          0x2b, 0x01, 0x06, 0x06, 0x06, 0x8f, 0x30, 0x00,
+                          0x00, 0x00, 0x02, 0x00, 0x02, 0x00, 0x70, 0x00,
+                          0x09, 0x00, 0x09, 0x00, 0x33, 0x00, 0x02, 0x00,
+                          0x70, 0x00, 0x09, 0x00, 0x09),
+               PHYREGS(0x03dd, 0x03d9, 0x03d5, 0x0424, 0x0429, 0x042d),
+       },
+       {
+               .freq                   = 2467,
+               RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0xa3, 0x06, 0x06, 0x04,
+                          0x2b, 0x01, 0x06, 0x06, 0x06, 0x8f, 0x30, 0x00,
+                          0x00, 0x00, 0x01, 0x00, 0x02, 0x00, 0x70, 0x00,
+                          0x09, 0x00, 0x09, 0x00, 0x22, 0x00, 0x02, 0x00,
+                          0x70, 0x00, 0x09, 0x00, 0x09),
+               PHYREGS(0x03df, 0x03db, 0x03d7, 0x0422, 0x0427, 0x042b),
+       },
+       {
+               .freq                   = 2472,
+               RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0xa8, 0x06, 0x06, 0x04,
+                          0x2b, 0x01, 0x07, 0x07, 0x07, 0x8f, 0x30, 0x00,
+                          0x00, 0x00, 0x01, 0x00, 0x02, 0x00, 0x70, 0x00,
+                          0x09, 0x00, 0x09, 0x00, 0x11, 0x00, 0x02, 0x00,
+                          0x70, 0x00, 0x09, 0x00, 0x09),
+               PHYREGS(0x03e1, 0x03dd, 0x03d9, 0x0420, 0x0424, 0x0429),
+       },
+       {
+               .freq                   = 2484,
+               RADIOREGS3(0xff, 0x01, 0x03, 0x09, 0xb4, 0x06, 0x06, 0x04,
+                          0x2b, 0x01, 0x07, 0x07, 0x07, 0x8f, 0x20, 0x00,
+                          0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x70, 0x00,
+                          0x09, 0x00, 0x09, 0x00, 0x00, 0x00, 0x02, 0x00,
+                          0x70, 0x00, 0x09, 0x00, 0x09),
+               PHYREGS(0x03e6, 0x03e2, 0x03de, 0x041b, 0x041f, 0x0424),
+       },
+};
+
+static const struct b2056_inittabs_pts
+*b43_nphy_get_inittabs_rev3(struct b43_wldev *dev)
+{
+       struct b43_phy *phy = &dev->phy;
+
+       switch (dev->phy.rev) {
+       case 3:
+               return &b2056_inittab_phy_rev3;
+       case 4:
+               return &b2056_inittab_phy_rev4;
+       default:
+               switch (phy->radio_rev) {
+               case 5:
+                       return &b2056_inittab_radio_rev5;
+               case 6:
+                       return &b2056_inittab_radio_rev6;
+               case 7:
+               case 9:
+                       return &b2056_inittab_radio_rev7_9;
+               case 8:
+                       return &b2056_inittab_radio_rev8;
+               case 11:
+                       return &b2056_inittab_radio_rev11;
+               }
+       }
+
+       return NULL;
+}
+
 static void b2056_upload_inittab(struct b43_wldev *dev, bool ghz5,
                                 bool ignore_uploadflag, u16 routing,
                                 const struct b2056_inittab_entry *e,
@@ -9037,11 +9312,11 @@ void b2056_upload_inittabs(struct b43_wldev *dev,
 {
        const struct b2056_inittabs_pts *pts;
 
-       if (dev->phy.rev >= ARRAY_SIZE(b2056_inittabs)) {
+       pts = b43_nphy_get_inittabs_rev3(dev);
+       if (!pts) {
                B43_WARN_ON(1);
                return;
        }
-       pts = &b2056_inittabs[dev->phy.rev];
 
        b2056_upload_inittab(dev, ghz5, ignore_uploadflag,
                                B2056_SYN, pts->syn, pts->syn_length);
@@ -9060,11 +9335,12 @@ void b2056_upload_syn_pll_cp2(struct b43_wldev *dev, bool ghz5)
        const struct b2056_inittabs_pts *pts;
        const struct b2056_inittab_entry *e;
 
-       if (dev->phy.rev >= ARRAY_SIZE(b2056_inittabs)) {
+       pts = b43_nphy_get_inittabs_rev3(dev);
+       if (!pts) {
                B43_WARN_ON(1);
                return;
        }
-       pts = &b2056_inittabs[dev->phy.rev];
+
        e = &pts->syn[B2056_SYN_PLL_CP2];
 
        b43_radio_write(dev, B2056_SYN_PLL_CP2, ghz5 ? e->ghz5 : e->ghz2);
@@ -9073,38 +9349,46 @@ void b2056_upload_syn_pll_cp2(struct b43_wldev *dev, bool ghz5)
 const struct b43_nphy_channeltab_entry_rev3 *
 b43_nphy_get_chantabent_rev3(struct b43_wldev *dev, u16 freq)
 {
+       struct b43_phy *phy = &dev->phy;
        const struct b43_nphy_channeltab_entry_rev3 *e;
        unsigned int length, i;
 
-       switch (dev->phy.rev) {
+       switch (phy->rev) {
        case 3:
-               e = b43_nphy_channeltab_rev3;
-               length = ARRAY_SIZE(b43_nphy_channeltab_rev3);
+               e = b43_nphy_channeltab_phy_rev3;
+               length = ARRAY_SIZE(b43_nphy_channeltab_phy_rev3);
                break;
        case 4:
-               e = b43_nphy_channeltab_rev4;
-               length = ARRAY_SIZE(b43_nphy_channeltab_rev4);
-               break;
-       case 5:
-               e = b43_nphy_channeltab_rev5;
-               length = ARRAY_SIZE(b43_nphy_channeltab_rev5);
-               break;
-       case 6:
-               e = b43_nphy_channeltab_rev6;
-               length = ARRAY_SIZE(b43_nphy_channeltab_rev6);
-               break;
-       case 7:
-       case 9:
-               e = b43_nphy_channeltab_rev7_9;
-               length = ARRAY_SIZE(b43_nphy_channeltab_rev7_9);
-               break;
-       case 8:
-               e = b43_nphy_channeltab_rev8;
-               length = ARRAY_SIZE(b43_nphy_channeltab_rev8);
+               e = b43_nphy_channeltab_phy_rev4;
+               length = ARRAY_SIZE(b43_nphy_channeltab_phy_rev4);
                break;
        default:
-               B43_WARN_ON(1);
-               return NULL;
+               switch (phy->radio_rev) {
+               case 5:
+                       e = b43_nphy_channeltab_radio_rev5;
+                       length = ARRAY_SIZE(b43_nphy_channeltab_radio_rev5);
+                       break;
+               case 6:
+                       e = b43_nphy_channeltab_radio_rev6;
+                       length = ARRAY_SIZE(b43_nphy_channeltab_radio_rev6);
+                       break;
+               case 7:
+               case 9:
+                       e = b43_nphy_channeltab_radio_rev7_9;
+                       length = ARRAY_SIZE(b43_nphy_channeltab_radio_rev7_9);
+                       break;
+               case 8:
+                       e = b43_nphy_channeltab_radio_rev8;
+                       length = ARRAY_SIZE(b43_nphy_channeltab_radio_rev8);
+                       break;
+               case 11:
+                       e = b43_nphy_channeltab_radio_rev11;
+                       length = ARRAY_SIZE(b43_nphy_channeltab_radio_rev11);
+                       break;
+               default:
+                       B43_WARN_ON(1);
+                       return NULL;
+               }
        }
 
        for (i = 0; i < length; i++, e++) {
index 94c755fdda14749eaa36716032283b8cb9fea7d2..50d03ffeac8c57337b090f467899648d5be2f607 100644 (file)
@@ -1627,74 +1627,7 @@ static const u32 b43_ntab_tdtrn_r3[] = {
        0xfa58fc00, 0x0b64fc7e, 0x0800f7b6, 0x00f006be,
 };
 
-static const u32 b43_ntab_noisevar0_r3[] = {
-       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
-       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
-       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
-       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
-       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
-       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
-       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
-       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
-       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
-       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
-       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
-       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
-       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
-       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
-       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
-       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
-       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
-       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
-       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
-       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
-       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
-       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
-       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
-       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
-       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
-       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
-       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
-       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
-       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
-       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
-       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
-       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
-       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
-       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
-       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
-       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
-       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
-       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
-       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
-       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
-       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
-       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
-       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
-       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
-       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
-       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
-       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
-       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
-       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
-       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
-       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
-       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
-       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
-       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
-       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
-       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
-       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
-       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
-       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
-       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
-       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
-       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
-       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
-       0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
-};
-
-static const u32 b43_ntab_noisevar1_r3[] = {
+static const u32 b43_ntab_noisevar_r3[] = {
        0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
        0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
        0x02110211, 0x0000014d, 0x02110211, 0x0000014d,
@@ -3114,8 +3047,7 @@ static void b43_nphy_tables_init_rev3(struct b43_wldev *dev)
        ntab_upload(dev, B43_NTAB_TMAP_R3, b43_ntab_tmap_r3);
        ntab_upload(dev, B43_NTAB_INTLEVEL_R3, b43_ntab_intlevel_r3);
        ntab_upload(dev, B43_NTAB_TDTRN_R3, b43_ntab_tdtrn_r3);
-       ntab_upload(dev, B43_NTAB_NOISEVAR0_R3, b43_ntab_noisevar0_r3);
-       ntab_upload(dev, B43_NTAB_NOISEVAR1_R3, b43_ntab_noisevar1_r3);
+       ntab_upload(dev, B43_NTAB_NOISEVAR_R3, b43_ntab_noisevar_r3);
        ntab_upload(dev, B43_NTAB_MCS_R3, b43_ntab_mcs_r3);
        ntab_upload(dev, B43_NTAB_TDI20A0_R3, b43_ntab_tdi20a0_r3);
        ntab_upload(dev, B43_NTAB_TDI20A1_R3, b43_ntab_tdi20a1_r3);
index 9ff33adcff891cad9551bafab507924a59a54afb..3a58aee4c4cf714aa72bc22a8c85670b135eb934 100644 (file)
@@ -143,8 +143,7 @@ struct nphy_gain_ctl_workaround_entry *b43_nphy_get_gain_ctl_workaround_ent(
 #define B43_NTAB_TMAP_R3               B43_NTAB32(12,   0) /* TM AP  */
 #define B43_NTAB_INTLEVEL_R3           B43_NTAB32(13,   0) /* INT LV  */
 #define B43_NTAB_TDTRN_R3              B43_NTAB32(14,   0) /* TD TRN  */
-#define B43_NTAB_NOISEVAR0_R3          B43_NTAB32(16,   0) /* noise variance 0  */
-#define B43_NTAB_NOISEVAR1_R3          B43_NTAB32(16, 128) /* noise variance 1  */
+#define B43_NTAB_NOISEVAR_R3           B43_NTAB32(16,   0) /* noise variance */
 #define B43_NTAB_MCS_R3                        B43_NTAB16(18,   0) /* MCS  */
 #define B43_NTAB_TDI20A0_R3            B43_NTAB32(19, 128) /* TDI 20/0  */
 #define B43_NTAB_TDI20A1_R3            B43_NTAB32(19, 256) /* TDI 20/1  */
index 9b1a038be08b860da91461b82c6bd84dd5000531..c218c08fb2f5b15ad3233b475b5d275d2f4ba0df 100644 (file)
@@ -441,7 +441,7 @@ static void b43_wa_altagc(struct b43_wldev *dev)
 
 static void b43_wa_tr_ltov(struct b43_wldev *dev) /* TR Lookup Table Original Values */
 {
-       b43_gtab_write(dev, B43_GTAB_ORIGTR, 0, 0xC480);
+       b43_gtab_write(dev, B43_GTAB_ORIGTR, 0, 0x7654);
 }
 
 static void b43_wa_cpll_nonpilot(struct b43_wldev *dev)
index df130ef53d1c4054f59f2fe10d197a80673000c9..c7c9f15c0fe08170ee2f953e9690a13e605a3291 100644 (file)
@@ -303,10 +303,10 @@ static void brcmf_chip_ai_coredisable(struct brcmf_core_priv *core,
 
        ci = core->chip;
 
-       /* if core is already in reset, just return */
+       /* if core is already in reset, skip reset */
        regdata = ci->ops->read32(ci->ctx, core->wrapbase + BCMA_RESET_CTL);
        if ((regdata & BCMA_RESET_CTL_RESET) != 0)
-               return;
+               goto in_reset_configure;
 
        /* configure reset */
        ci->ops->write32(ci->ctx, core->wrapbase + BCMA_IOCTL,
@@ -322,6 +322,7 @@ static void brcmf_chip_ai_coredisable(struct brcmf_core_priv *core,
        SPINWAIT(ci->ops->read32(ci->ctx, core->wrapbase + BCMA_RESET_CTL) !=
                 BCMA_RESET_CTL_RESET, 300);
 
+in_reset_configure:
        /* in-reset configure */
        ci->ops->write32(ci->ctx, core->wrapbase + BCMA_IOCTL,
                         reset | BCMA_IOCTL_FGC | BCMA_IOCTL_CLK);
index 8c5fa4e581392d73d28ba29b790151be8f37f569..43c71bfaa4744fe4094069b28ff4d9084d6c679f 100644 (file)
@@ -897,7 +897,8 @@ static bool brcms_tx_flush_completed(struct brcms_info *wl)
        return result;
 }
 
-static void brcms_ops_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
+static void brcms_ops_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+                           u32 queues, bool drop)
 {
        struct brcms_info *wl = hw->priv;
        int ret;
index 103f7bce893208c30eb692cca9aa9adf51b8d8b9..cd0cad7f775993661af9e8f4577c89976b15b811 100644 (file)
@@ -936,7 +936,8 @@ static int __cw1200_flush(struct cw1200_common *priv, bool drop)
        return ret;
 }
 
-void cw1200_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
+void cw1200_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+                 u32 queues, bool drop)
 {
        struct cw1200_common *priv = hw->priv;
 
index 35babb62cc6a8b5a952f0a989c15ad4edf802f17..b7e386b7662b668b8299a9ab52f3c22f2633dfe5 100644 (file)
@@ -40,7 +40,8 @@ int cw1200_set_key(struct ieee80211_hw *dev, enum set_key_cmd cmd,
 
 int cw1200_set_rts_threshold(struct ieee80211_hw *hw, u32 value);
 
-void cw1200_flush(struct ieee80211_hw *hw, u32 queues, bool drop);
+void cw1200_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+                 u32 queues, bool drop);
 
 u64 cw1200_prepare_multicast(struct ieee80211_hw *hw,
                             struct netdev_hw_addr_list *mc_list);
index 67db34e56d7eb0765c5e75025e02470cae01da64..52919ad4272622aeb92d8b9d3d74daf1715a3d40 100644 (file)
@@ -882,7 +882,7 @@ void hostap_setup_dev(struct net_device *dev, local_info_t *local,
        dev->mtu = local->mtu;
 
 
-       SET_ETHTOOL_OPS(dev, &prism2_ethtool_ops);
+       dev->ethtool_ops = &prism2_ethtool_ops;
 
 }
 
index d37a6fd90d400a6dfd90d20cdf02d73e7e536d42..b598e2803500ec7b109111f5118dde1414547a22 100644 (file)
@@ -573,7 +573,7 @@ il3945_hdl_rx(struct il_priv *il, struct il_rx_buf *rxb)
                rx_status.flag |= RX_FLAG_SHORTPRE;
 
        if ((unlikely(rx_stats->phy_count > 20))) {
-               D_DROP("dsp size out of range [0,20]: %d/n",
+               D_DROP("dsp size out of range [0,20]: %d\n",
                       rx_stats->phy_count);
                return;
        }
index 888ad5c74639e351a3727c8b934a68f7969849e5..c159c05db6ef212b8b684c5726e61a8a97c62c39 100644 (file)
@@ -670,7 +670,7 @@ il4965_hdl_rx(struct il_priv *il, struct il_rx_buf *rxb)
        }
 
        if ((unlikely(phy_res->cfg_phy_cnt > 20))) {
-               D_DROP("dsp size out of range [0,20]: %d/n",
+               D_DROP("dsp size out of range [0,20]: %d\n",
                       phy_res->cfg_phy_cnt);
                return;
        }
index 4f42174d999412102e273744fc39ff692b9a9234..ecc674627e6e10b30a5a7b11ab3150c1ad42b37e 100644 (file)
@@ -4755,7 +4755,8 @@ out:
 }
 EXPORT_SYMBOL(il_mac_change_interface);
 
-void il_mac_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
+void il_mac_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+                 u32 queues, bool drop)
 {
        struct il_priv *il = hw->priv;
        unsigned long timeout = jiffies + msecs_to_jiffies(500);
index dfb13c70efe83ea8415f93ef2dad9c97a0cb6891..ea5c0f863c4ee35b2cf6738569a5cb3253553c12 100644 (file)
@@ -1723,7 +1723,8 @@ void il_mac_remove_interface(struct ieee80211_hw *hw,
                             struct ieee80211_vif *vif);
 int il_mac_change_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
                            enum nl80211_iftype newtype, bool newp2p);
-void il_mac_flush(struct ieee80211_hw *hw, u32 queues, bool drop);
+void il_mac_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+                 u32 queues, bool drop);
 int il_alloc_txq_mem(struct il_priv *il);
 void il_free_txq_mem(struct il_priv *il);
 
index 576f7ee38ca5894150cba0e54eff4bcc5ff2f079..d169228f59e7006528df76a96d300010f8e44a4b 100644 (file)
@@ -180,7 +180,7 @@ void iwlagn_dev_txfifo_flush(struct iwl_priv *priv)
                goto done;
        }
        IWL_DEBUG_INFO(priv, "wait transmit/flush all frames\n");
-       iwl_trans_wait_tx_queue_empty(priv->trans);
+       iwl_trans_wait_tx_queue_empty(priv->trans, 0xffffffff);
 done:
        ieee80211_wake_queues(priv->hw);
        mutex_unlock(&priv->mutex);
index dd55c9cf7ba80376ef3ae507b434e79d6dd1cca4..29af7b51e3708788d02f4a1651205a348a5102dd 100644 (file)
@@ -1091,7 +1091,8 @@ static void iwlagn_configure_filter(struct ieee80211_hw *hw,
                        FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL;
 }
 
-static void iwlagn_mac_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
+static void iwlagn_mac_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+                            u32 queues, bool drop)
 {
        struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
 
@@ -1119,7 +1120,7 @@ static void iwlagn_mac_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
                }
        }
        IWL_DEBUG_MAC80211(priv, "wait transmit/flush all frames\n");
-       iwl_trans_wait_tx_queue_empty(priv->trans);
+       iwl_trans_wait_tx_queue_empty(priv->trans, 0xffffffff);
 done:
        mutex_unlock(&priv->mutex);
        IWL_DEBUG_MAC80211(priv, "leave\n");
index 6a6df71af1d7ba6e4b4dfec16a2042c6cc357de4..6a00353768f328b1931d54a51eef8c7eb8bbc248 100644 (file)
@@ -2053,6 +2053,17 @@ static bool iwl_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state)
        return false;
 }
 
+static void iwl_napi_add(struct iwl_op_mode *op_mode,
+                        struct napi_struct *napi,
+                        struct net_device *napi_dev,
+                        int (*poll)(struct napi_struct *, int),
+                        int weight)
+{
+       struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
+
+       ieee80211_napi_add(priv->hw, napi, napi_dev, poll, weight);
+}
+
 static const struct iwl_op_mode_ops iwl_dvm_ops = {
        .start = iwl_op_mode_dvm_start,
        .stop = iwl_op_mode_dvm_stop,
@@ -2065,6 +2076,7 @@ static const struct iwl_op_mode_ops iwl_dvm_ops = {
        .cmd_queue_full = iwl_cmd_queue_full,
        .nic_config = iwl_nic_config,
        .wimax_active = iwl_wimax_active,
+       .napi_add = iwl_napi_add,
 };
 
 /*****************************************************************************
index 854ba84ccb730995f0d786d26a85a1c2fc14c0dc..c3817fae16c04207136e5d45e8cc65bd3a125429 100644 (file)
@@ -62,6 +62,7 @@ static const struct iwl_base_params iwl1000_base_params = {
        .led_compensation = 51,
        .wd_timeout = IWL_WATCHDOG_DISABLED,
        .max_event_log_size = 128,
+       .scd_chain_ext_wa = true,
 };
 
 static const struct iwl_ht_params iwl1000_ht_params = {
index 3e63323637f3f593dd895528dac4e5fb794fdb4c..21e5d0843a62a84a0f21ff337d1b674750fa3999 100644 (file)
@@ -75,6 +75,7 @@ static const struct iwl_base_params iwl2000_base_params = {
        .wd_timeout = IWL_DEF_WD_TIMEOUT,
        .max_event_log_size = 512,
        .shadow_reg_enable = false, /* TODO: fix bugs using this feature */
+       .scd_chain_ext_wa = true,
 };
 
 
@@ -88,6 +89,7 @@ static const struct iwl_base_params iwl2030_base_params = {
        .wd_timeout = IWL_LONG_WD_TIMEOUT,
        .max_event_log_size = 512,
        .shadow_reg_enable = false, /* TODO: fix bugs using this feature */
+       .scd_chain_ext_wa = true,
 };
 
 static const struct iwl_ht_params iwl2000_ht_params = {
index 6674f2c4541c183fbae6c2a1bb0861d9eb3a966f..332bbede39e5b0fc6bb25b7ab30bbbd22c929b8b 100644 (file)
@@ -61,6 +61,7 @@ static const struct iwl_base_params iwl5000_base_params = {
        .led_compensation = 51,
        .wd_timeout = IWL_WATCHDOG_DISABLED,
        .max_event_log_size = 512,
+       .scd_chain_ext_wa = true,
 };
 
 static const struct iwl_ht_params iwl5000_ht_params = {
index 8048de90233fa038545e9d752eaaffbfc3968c72..8f2c3c8c6b843f78f346225d371ee3ad3df54f23 100644 (file)
@@ -85,6 +85,7 @@ static const struct iwl_base_params iwl6000_base_params = {
        .wd_timeout = IWL_DEF_WD_TIMEOUT,
        .max_event_log_size = 512,
        .shadow_reg_enable = false, /* TODO: fix bugs using this feature */
+       .scd_chain_ext_wa = true,
 };
 
 static const struct iwl_base_params iwl6050_base_params = {
@@ -97,6 +98,7 @@ static const struct iwl_base_params iwl6050_base_params = {
        .wd_timeout = IWL_DEF_WD_TIMEOUT,
        .max_event_log_size = 1024,
        .shadow_reg_enable = false, /* TODO: fix bugs using this feature */
+       .scd_chain_ext_wa = true,
 };
 
 static const struct iwl_base_params iwl6000_g2_base_params = {
@@ -109,6 +111,7 @@ static const struct iwl_base_params iwl6000_g2_base_params = {
        .wd_timeout = IWL_LONG_WD_TIMEOUT,
        .max_event_log_size = 512,
        .shadow_reg_enable = false, /* TODO: fix bugs using this feature */
+       .scd_chain_ext_wa = true,
 };
 
 static const struct iwl_ht_params iwl6000_ht_params = {
index 4c2d4ef28b220c719ac49f9f9726b931c2d35442..f73de239cdc112d09f0f65097063a59bd9378bca 100644 (file)
 #define IWL3160_UCODE_API_MAX  9
 
 /* Oldest version we won't warn about */
-#define IWL7260_UCODE_API_OK   8
-#define IWL3160_UCODE_API_OK   8
+#define IWL7260_UCODE_API_OK   9
+#define IWL3160_UCODE_API_OK   9
 
 /* Lowest firmware API version supported */
-#define IWL7260_UCODE_API_MIN  7
-#define IWL3160_UCODE_API_MIN  7
+#define IWL7260_UCODE_API_MIN  8
+#define IWL3160_UCODE_API_MIN  8
 
 /* NVM versions */
 #define IWL7260_NVM_VERSION            0x0a1d
@@ -107,6 +107,7 @@ static const struct iwl_base_params iwl7000_base_params = {
        .max_event_log_size = 512,
        .shadow_reg_enable = true,
        .pcie_l1_allowed = true,
+       .apmg_wake_up_wa = true,
 };
 
 static const struct iwl_ht_params iwl7000_ht_params = {
index 3f17dc3f2c8a9fdde83bddb254efb8cc8d33502f..7ce82d9c72226d7ee2e76a3d9a3f0426df1cbf25 100644 (file)
@@ -146,6 +146,9 @@ static inline u8 num_of_ant(u8 mask)
  * @wd_timeout: TX queues watchdog timeout
  * @max_event_log_size: size of event log buffer size for ucode event logging
  * @shadow_reg_enable: HW shadow register support
+ * @apmg_wake_up_wa: should the MAC access REQ be asserted when a command
+ *     is in flight. This is due to a HW bug in 7260, 3160 and 7265.
+ * @scd_chain_ext_wa: should the chain extension feature in SCD be disabled.
  */
 struct iwl_base_params {
        int eeprom_size;
@@ -160,6 +163,8 @@ struct iwl_base_params {
        u32 max_event_log_size;
        const bool shadow_reg_enable;
        const bool pcie_l1_allowed;
+       const bool apmg_wake_up_wa;
+       const bool scd_chain_ext_wa;
 };
 
 /*
index d14f19339d6140607c99d1b6660b039f9ac4aa66..f5927d0cf9b66dca59c13ede6e3a6627a368fecf 100644 (file)
  * @IWL_UCODE_TLV_FLAGS_UAPSD: This uCode image supports uAPSD
  * @IWL_UCODE_TLV_FLAGS_SHORT_BL: 16 entries of black list instead of 64 in scan
  *     offload profile config command.
- * @IWL_UCODE_TLV_FLAGS_RX_ENERGY_API: supports rx signal strength api
- * @IWL_UCODE_TLV_FLAGS_TIME_EVENT_API_V2: using the new time event API.
  * @IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS: D3 image supports up to six
  *     (rather than two) IPv6 addresses
- * @IWL_UCODE_TLV_FLAGS_BF_UPDATED: new beacon filtering API
  * @IWL_UCODE_TLV_FLAGS_NO_BASIC_SSID: not sending a probe with the SSID element
  *     from the probe request template.
- * @IWL_UCODE_TLV_FLAGS_D3_CONTINUITY_API: modified D3 API to allow keeping
- *     connection when going back to D0
  * @IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL: new NS offload (small version)
  * @IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_LARGE: new NS offload (large version)
- * @IWL_UCODE_TLV_FLAGS_SCHED_SCAN: this uCode image supports scheduled scan.
- * @IWL_UCODE_TLV_FLAGS_STA_KEY_CMD: new ADD_STA and ADD_STA_KEY command API
- * @IWL_UCODE_TLV_FLAGS_DEVICE_PS_CMD: support device wide power command
- *     containing CAM (Continuous Active Mode) indication.
+ * @IWL_UCODE_TLV_FLAGS_P2P_PM: P2P client supports PM as a stand alone MAC
  * @IWL_UCODE_TLV_FLAGS_P2P_BSS_PS_DCM: support power save on BSS station and
  *     P2P client interfaces simultaneously if they are in different bindings.
+ * @IWL_UCODE_TLV_FLAGS_P2P_BSS_PS_SCM: support power save on BSS station and
+ *     P2P client interfaces simultaneously if they are in same bindings.
  * @IWL_UCODE_TLV_FLAGS_P2P_PS_UAPSD: P2P client supports uAPSD power save
  * @IWL_UCODE_TLV_FLAGS_BCAST_FILTERING: uCode supports broadcast filtering.
  * @IWL_UCODE_TLV_FLAGS_GO_UAPSD: AP/GO interfaces support uAPSD clients
+ * @IWL_UCODE_TLV_FLAGS_EBS_SUPPORT: this uCode image supports EBS.
  */
 enum iwl_ucode_tlv_flag {
        IWL_UCODE_TLV_FLAGS_PAN                 = BIT(0),
@@ -104,22 +99,15 @@ enum iwl_ucode_tlv_flag {
        IWL_UCODE_TLV_FLAGS_MFP                 = BIT(2),
        IWL_UCODE_TLV_FLAGS_P2P                 = BIT(3),
        IWL_UCODE_TLV_FLAGS_DW_BC_TABLE         = BIT(4),
-       IWL_UCODE_TLV_FLAGS_NEWBT_COEX          = BIT(5),
-       IWL_UCODE_TLV_FLAGS_PM_CMD_SUPPORT      = BIT(6),
        IWL_UCODE_TLV_FLAGS_SHORT_BL            = BIT(7),
-       IWL_UCODE_TLV_FLAGS_RX_ENERGY_API       = BIT(8),
-       IWL_UCODE_TLV_FLAGS_TIME_EVENT_API_V2   = BIT(9),
        IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS     = BIT(10),
-       IWL_UCODE_TLV_FLAGS_BF_UPDATED          = BIT(11),
        IWL_UCODE_TLV_FLAGS_NO_BASIC_SSID       = BIT(12),
-       IWL_UCODE_TLV_FLAGS_D3_CONTINUITY_API   = BIT(14),
        IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL    = BIT(15),
        IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_LARGE    = BIT(16),
-       IWL_UCODE_TLV_FLAGS_SCHED_SCAN          = BIT(17),
-       IWL_UCODE_TLV_FLAGS_STA_KEY_CMD         = BIT(19),
-       IWL_UCODE_TLV_FLAGS_DEVICE_PS_CMD       = BIT(20),
+       IWL_UCODE_TLV_FLAGS_P2P_PM              = BIT(21),
        IWL_UCODE_TLV_FLAGS_BSS_P2P_PS_DCM      = BIT(22),
-       IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT       = BIT(24),
+       IWL_UCODE_TLV_FLAGS_BSS_P2P_PS_SCM      = BIT(23),
+       IWL_UCODE_TLV_FLAGS_EBS_SUPPORT         = BIT(25),
        IWL_UCODE_TLV_FLAGS_P2P_PS_UAPSD        = BIT(26),
        IWL_UCODE_TLV_FLAGS_BCAST_FILTERING     = BIT(29),
        IWL_UCODE_TLV_FLAGS_GO_UAPSD            = BIT(30),
@@ -183,6 +171,7 @@ enum iwl_ucode_sec {
 #define IWL_UCODE_SECTION_MAX 12
 #define IWL_API_ARRAY_SIZE     1
 #define IWL_CAPABILITIES_ARRAY_SIZE    1
+#define CPU1_CPU2_SEPARATOR_SECTION    0xFFFFCCCC
 
 struct iwl_ucode_capabilities {
        u32 max_probe_length;
index 6be30c69850619f81c2468febb3634fd5cf390fd..4049c0d626ba5dfb2f6be5754b27074175730dbc 100644 (file)
@@ -134,12 +134,13 @@ static const u8 iwl_nvm_channels_family_8000[] = {
        149, 153, 157, 161, 165, 169, 173, 177, 181
 };
 
-#define IWL_NUM_CHANNELS       ARRAY_SIZE(iwl_nvm_channels)
+#define IWL_NUM_CHANNELS               ARRAY_SIZE(iwl_nvm_channels)
 #define IWL_NUM_CHANNELS_FAMILY_8000   ARRAY_SIZE(iwl_nvm_channels_family_8000)
-#define NUM_2GHZ_CHANNELS      14
-#define FIRST_2GHZ_HT_MINUS    5
-#define LAST_2GHZ_HT_PLUS      9
-#define LAST_5GHZ_HT           161
+#define NUM_2GHZ_CHANNELS              14
+#define NUM_2GHZ_CHANNELS_FAMILY_8000  13
+#define FIRST_2GHZ_HT_MINUS            5
+#define LAST_2GHZ_HT_PLUS              9
+#define LAST_5GHZ_HT                   161
 
 #define DEFAULT_MAX_TX_POWER 16
 
@@ -202,21 +203,23 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
        struct ieee80211_channel *channel;
        u16 ch_flags;
        bool is_5ghz;
-       int num_of_ch;
+       int num_of_ch, num_2ghz_channels;
        const u8 *nvm_chan;
 
        if (cfg->device_family != IWL_DEVICE_FAMILY_8000) {
                num_of_ch = IWL_NUM_CHANNELS;
                nvm_chan = &iwl_nvm_channels[0];
+               num_2ghz_channels = NUM_2GHZ_CHANNELS;
        } else {
                num_of_ch = IWL_NUM_CHANNELS_FAMILY_8000;
                nvm_chan = &iwl_nvm_channels_family_8000[0];
+               num_2ghz_channels = NUM_2GHZ_CHANNELS_FAMILY_8000;
        }
 
        for (ch_idx = 0; ch_idx < num_of_ch; ch_idx++) {
                ch_flags = __le16_to_cpup(nvm_ch_flags + ch_idx);
 
-               if (ch_idx >= NUM_2GHZ_CHANNELS &&
+               if (ch_idx >= num_2ghz_channels &&
                    !data->sku_cap_band_52GHz_enable)
                        ch_flags &= ~NVM_CHANNEL_VALID;
 
@@ -225,7 +228,7 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
                                         "Ch. %d Flags %x [%sGHz] - No traffic\n",
                                         nvm_chan[ch_idx],
                                         ch_flags,
-                                        (ch_idx >= NUM_2GHZ_CHANNELS) ?
+                                        (ch_idx >= num_2ghz_channels) ?
                                         "5.2" : "2.4");
                        continue;
                }
@@ -234,7 +237,7 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
                n_channels++;
 
                channel->hw_value = nvm_chan[ch_idx];
-               channel->band = (ch_idx < NUM_2GHZ_CHANNELS) ?
+               channel->band = (ch_idx < num_2ghz_channels) ?
                                IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
                channel->center_freq =
                        ieee80211_channel_to_frequency(
@@ -242,7 +245,7 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
 
                /* TODO: Need to be dependent to the NVM */
                channel->flags = IEEE80211_CHAN_NO_HT40;
-               if (ch_idx < NUM_2GHZ_CHANNELS &&
+               if (ch_idx < num_2ghz_channels &&
                    (ch_flags & NVM_CHANNEL_40MHZ)) {
                        if (nvm_chan[ch_idx] <= LAST_2GHZ_HT_PLUS)
                                channel->flags &= ~IEEE80211_CHAN_NO_HT40PLUS;
@@ -250,7 +253,7 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
                                channel->flags &= ~IEEE80211_CHAN_NO_HT40MINUS;
                } else if (nvm_chan[ch_idx] <= LAST_5GHZ_HT &&
                           (ch_flags & NVM_CHANNEL_40MHZ)) {
-                       if ((ch_idx - NUM_2GHZ_CHANNELS) % 2 == 0)
+                       if ((ch_idx - num_2ghz_channels) % 2 == 0)
                                channel->flags &= ~IEEE80211_CHAN_NO_HT40PLUS;
                        else
                                channel->flags &= ~IEEE80211_CHAN_NO_HT40MINUS;
index ea29504ac61704c39c24a117dec0a5d92aa58376..99785c892f963c7435b0048c4a097f6cae9e7808 100644 (file)
@@ -63,6 +63,7 @@
 #ifndef __iwl_op_mode_h__
 #define __iwl_op_mode_h__
 
+#include <linux/netdevice.h>
 #include <linux/debugfs.h>
 
 struct iwl_op_mode;
@@ -112,8 +113,11 @@ struct iwl_cfg;
  * @stop: stop the op_mode. Must free all the memory allocated.
  *     May sleep
  * @rx: Rx notification to the op_mode. rxb is the Rx buffer itself. Cmd is the
- *     HCMD this Rx responds to.
- *     This callback may sleep, it is called from a threaded IRQ handler.
+ *     HCMD this Rx responds to. Can't sleep.
+ * @napi_add: NAPI initialisation. The transport is fully responsible for NAPI,
+ *     but the higher layers need to know about it (in particular mac80211 to
+ *     to able to call the right NAPI RX functions); this function is needed
+ *     to eventually call netif_napi_add() with higher layer involvement.
  * @queue_full: notifies that a HW queue is full.
  *     Must be atomic and called with BH disabled.
  * @queue_not_full: notifies that a HW queue is not full any more.
@@ -143,6 +147,11 @@ struct iwl_op_mode_ops {
        void (*stop)(struct iwl_op_mode *op_mode);
        int (*rx)(struct iwl_op_mode *op_mode, struct iwl_rx_cmd_buffer *rxb,
                  struct iwl_device_cmd *cmd);
+       void (*napi_add)(struct iwl_op_mode *op_mode,
+                        struct napi_struct *napi,
+                        struct net_device *napi_dev,
+                        int (*poll)(struct napi_struct *, int),
+                        int weight);
        void (*queue_full)(struct iwl_op_mode *op_mode, int queue);
        void (*queue_not_full)(struct iwl_op_mode *op_mode, int queue);
        bool (*hw_rf_kill)(struct iwl_op_mode *op_mode, bool state);
@@ -180,7 +189,6 @@ static inline int iwl_op_mode_rx(struct iwl_op_mode *op_mode,
                                  struct iwl_rx_cmd_buffer *rxb,
                                  struct iwl_device_cmd *cmd)
 {
-       might_sleep();
        return op_mode->ops->rx(op_mode, rxb, cmd);
 }
 
@@ -249,4 +257,15 @@ static inline int iwl_op_mode_exit_d0i3(struct iwl_op_mode *op_mode)
        return op_mode->ops->exit_d0i3(op_mode);
 }
 
+static inline void iwl_op_mode_napi_add(struct iwl_op_mode *op_mode,
+                                       struct napi_struct *napi,
+                                       struct net_device *napi_dev,
+                                       int (*poll)(struct napi_struct *, int),
+                                       int weight)
+{
+       if (!op_mode->ops->napi_add)
+               return;
+       op_mode->ops->napi_add(op_mode, napi, napi_dev, poll, weight);
+}
+
 #endif /* __iwl_op_mode_h__ */
index 5f657c501406cc995f7f8c065f26d9983ba43ffe..779311080a9ef8c0a18dd2a305121fa4d058a128 100644 (file)
@@ -348,4 +348,12 @@ enum secure_load_status_reg {
 
 #define LMPM_SECURE_TIME_OUT   (100)
 
+/* Rx FIFO */
+#define RXF_SIZE_ADDR                  (0xa00c88)
+#define RXF_SIZE_BYTE_CND_POS          (7)
+#define RXF_SIZE_BYTE_CNT_MSK          (0x3ff << RXF_SIZE_BYTE_CND_POS)
+
+#define RXF_LD_FENCE_OFFSET_ADDR       (0xa00c10)
+#define RXF_FIFO_RD_FENCE_ADDR         (0xa00c0c)
+
 #endif                         /* __iwl_prph_h__ */
index 8cdb0dd618a6fdfcc8d57095e41974e6e22984ab..22fd94ec804882651625ab9506840a66029006ff 100644 (file)
@@ -437,8 +437,7 @@ struct iwl_trans;
  *     this one. The op_mode must not configure the HCMD queue. May sleep.
  * @txq_disable: de-configure a Tx queue to send AMPDUs
  *     Must be atomic
- * @wait_tx_queue_empty: wait until all tx queues are empty
- *     May sleep
+ * @wait_tx_queue_empty: wait until tx queues are empty. May sleep.
  * @dbgfs_register: add the dbgfs files under this directory. Files will be
  *     automatically deleted.
  * @write8: write a u8 to a register at offset ofs from the BAR
@@ -490,7 +489,7 @@ struct iwl_trans_ops {
        void (*txq_disable)(struct iwl_trans *trans, int queue);
 
        int (*dbgfs_register)(struct iwl_trans *trans, struct dentry* dir);
-       int (*wait_tx_queue_empty)(struct iwl_trans *trans);
+       int (*wait_tx_queue_empty)(struct iwl_trans *trans, u32 txq_bm);
 
        void (*write8)(struct iwl_trans *trans, u32 ofs, u8 val);
        void (*write32)(struct iwl_trans *trans, u32 ofs, u32 val);
@@ -759,12 +758,13 @@ static inline void iwl_trans_ac_txq_enable(struct iwl_trans *trans, int queue,
                             IWL_MAX_TID_COUNT, IWL_FRAME_LIMIT, 0);
 }
 
-static inline int iwl_trans_wait_tx_queue_empty(struct iwl_trans *trans)
+static inline int iwl_trans_wait_tx_queue_empty(struct iwl_trans *trans,
+                                               u32 txq_bm)
 {
        if (unlikely(trans->state != IWL_TRANS_FW_ALIVE))
                IWL_ERR(trans, "%s bad state = %d", __func__, trans->state);
 
-       return trans->ops->wait_tx_queue_empty(trans);
+       return trans->ops->wait_tx_queue_empty(trans, txq_bm);
 }
 
 static inline int iwl_trans_dbgfs_register(struct iwl_trans *trans,
index fa858d548d13c0bd794b98dc4da2053893b460dc..8f4b03dbaf3f4ecae21927a0c976d2b1c3852157 100644 (file)
@@ -104,11 +104,8 @@ static const u8 iwl_bt_prio_tbl[BT_COEX_PRIO_TBL_EVT_MAX] = {
 #define BT_DISABLE_REDUCED_TXPOWER_THRESHOLD   (-65)
 #define BT_ANTENNA_COUPLING_THRESHOLD          (30)
 
-int iwl_send_bt_prio_tbl(struct iwl_mvm *mvm)
+static int iwl_send_bt_prio_tbl(struct iwl_mvm *mvm)
 {
-       if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_NEWBT_COEX))
-               return 0;
-
        return iwl_mvm_send_cmd_pdu(mvm, BT_COEX_PRIO_TABLE, CMD_SYNC,
                                    sizeof(struct iwl_bt_coex_prio_tbl_cmd),
                                    &iwl_bt_prio_tbl);
@@ -573,8 +570,9 @@ int iwl_send_bt_init_conf(struct iwl_mvm *mvm)
        int ret;
        u32 flags;
 
-       if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_NEWBT_COEX))
-               return 0;
+       ret = iwl_send_bt_prio_tbl(mvm);
+       if (ret)
+               return ret;
 
        bt_cmd = kzalloc(sizeof(*bt_cmd), GFP_KERNEL);
        if (!bt_cmd)
@@ -582,10 +580,12 @@ int iwl_send_bt_init_conf(struct iwl_mvm *mvm)
        cmd.data[0] = bt_cmd;
 
        bt_cmd->max_kill = 5;
-       bt_cmd->bt4_antenna_isolation_thr = BT_ANTENNA_COUPLING_THRESHOLD,
-       bt_cmd->bt4_antenna_isolation = iwlwifi_mod_params.ant_coupling,
-       bt_cmd->bt4_tx_tx_delta_freq_thr = 15,
-       bt_cmd->bt4_tx_rx_max_freq0 = 15,
+       bt_cmd->bt4_antenna_isolation_thr = BT_ANTENNA_COUPLING_THRESHOLD;
+       bt_cmd->bt4_antenna_isolation = iwlwifi_mod_params.ant_coupling;
+       bt_cmd->bt4_tx_tx_delta_freq_thr = 15;
+       bt_cmd->bt4_tx_rx_max_freq0 = 15;
+       bt_cmd->override_primary_lut = BT_COEX_INVALID_LUT;
+       bt_cmd->override_secondary_lut = BT_COEX_INVALID_LUT;
 
        flags = iwlwifi_mod_params.bt_coex_active ?
                        BT_COEX_NW : BT_COEX_DISABLE;
@@ -1215,6 +1215,17 @@ bool iwl_mvm_bt_coex_is_mimo_allowed(struct iwl_mvm *mvm,
        return iwl_get_coex_type(mvm, mvmsta->vif) == BT_COEX_TIGHT_LUT;
 }
 
+bool iwl_mvm_bt_coex_is_tpc_allowed(struct iwl_mvm *mvm,
+                                   enum ieee80211_band band)
+{
+       u32 bt_activity = le32_to_cpu(mvm->last_bt_notif.bt_activity_grading);
+
+       if (band != IEEE80211_BAND_2GHZ)
+               return false;
+
+       return bt_activity >= BT_LOW_TRAFFIC;
+}
+
 u8 iwl_mvm_bt_coex_tx_prio(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr,
                           struct ieee80211_tx_info *info, u8 ac)
 {
@@ -1249,9 +1260,6 @@ u8 iwl_mvm_bt_coex_tx_prio(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr,
 
 void iwl_mvm_bt_coex_vif_change(struct iwl_mvm *mvm)
 {
-       if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_NEWBT_COEX))
-               return;
-
        iwl_mvm_bt_coex_notif_handle(mvm);
 }
 
index e56f5a0edf855331a1411e76406a143176b5e9d5..7694472a303e0062b511ddb97404a11a1717c75d 100644 (file)
@@ -744,10 +744,8 @@ static int iwl_mvm_get_last_nonqos_seq(struct iwl_mvm *mvm,
        int err;
        u32 size;
 
-       if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_D3_CONTINUITY_API) {
-               cmd.data[0] = &query_cmd;
-               cmd.len[0] = sizeof(query_cmd);
-       }
+       cmd.data[0] = &query_cmd;
+       cmd.len[0] = sizeof(query_cmd);
 
        err = iwl_mvm_send_cmd(mvm, &cmd);
        if (err)
@@ -758,10 +756,8 @@ static int iwl_mvm_get_last_nonqos_seq(struct iwl_mvm *mvm,
                err = -EINVAL;
        } else {
                err = le16_to_cpup((__le16 *)cmd.resp_pkt->data);
-               /* new API returns next, not last-used seqno */
-               if (mvm->fw->ucode_capa.flags &
-                               IWL_UCODE_TLV_FLAGS_D3_CONTINUITY_API)
-                       err = (u16) (err - 0x10);
+               /* firmware returns next, not last-used seqno */
+               err = (u16) (err - 0x10);
        }
 
        iwl_free_resp(&cmd);
@@ -785,10 +781,6 @@ void iwl_mvm_set_last_nonqos_seq(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
 
        mvmvif->seqno_valid = false;
 
-       if (!(mvm->fw->ucode_capa.flags &
-                       IWL_UCODE_TLV_FLAGS_D3_CONTINUITY_API))
-               return;
-
        if (iwl_mvm_send_cmd_pdu(mvm, NON_QOS_TX_COUNTER_CMD, CMD_SYNC,
                                 sizeof(query_cmd), &query_cmd))
                IWL_ERR(mvm, "failed to set non-QoS seqno\n");
@@ -1082,6 +1074,15 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
 
 int iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
 {
+       struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+
+       if (iwl_mvm_is_d0i3_supported(mvm)) {
+               mutex_lock(&mvm->d0i3_suspend_mutex);
+               __set_bit(D0I3_DEFER_WAKEUP, &mvm->d0i3_suspend_flags);
+               mutex_unlock(&mvm->d0i3_suspend_mutex);
+               return 0;
+       }
+
        return __iwl_mvm_suspend(hw, wowlan, false);
 }
 
@@ -1277,7 +1278,7 @@ static void iwl_mvm_set_tkip_rx_seq(struct tkip_sc *scs,
 }
 
 static void iwl_mvm_set_key_rx_seq(struct ieee80211_key_conf *key,
-                                  struct iwl_wowlan_status_v6 *status)
+                                  struct iwl_wowlan_status *status)
 {
        union iwl_all_tsc_rsc *rsc = &status->gtk.rsc.all_tsc_rsc;
 
@@ -1294,7 +1295,7 @@ static void iwl_mvm_set_key_rx_seq(struct ieee80211_key_conf *key,
 }
 
 struct iwl_mvm_d3_gtk_iter_data {
-       struct iwl_wowlan_status_v6 *status;
+       struct iwl_wowlan_status *status;
        void *last_gtk;
        u32 cipher;
        bool find_phase, unhandled_cipher;
@@ -1370,7 +1371,7 @@ static void iwl_mvm_d3_update_gtks(struct ieee80211_hw *hw,
 
 static bool iwl_mvm_setup_connection_keep(struct iwl_mvm *mvm,
                                          struct ieee80211_vif *vif,
-                                         struct iwl_wowlan_status_v6 *status)
+                                         struct iwl_wowlan_status *status)
 {
        struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
        struct iwl_mvm_d3_gtk_iter_data gtkdata = {
@@ -1468,7 +1469,7 @@ static bool iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
                .flags = CMD_SYNC | CMD_WANT_SKB,
        };
        struct iwl_wowlan_status_data status;
-       struct iwl_wowlan_status_v6 *status_v6;
+       struct iwl_wowlan_status *fw_status;
        int ret, len, status_size, i;
        bool keep;
        struct ieee80211_sta *ap_sta;
@@ -1505,10 +1506,7 @@ static bool iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
        if (!cmd.resp_pkt)
                goto out_unlock;
 
-       if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_D3_CONTINUITY_API)
-               status_size = sizeof(struct iwl_wowlan_status_v6);
-       else
-               status_size = sizeof(struct iwl_wowlan_status_v4);
+       status_size = sizeof(*fw_status);
 
        len = iwl_rx_packet_payload_len(cmd.resp_pkt);
        if (len < status_size) {
@@ -1516,35 +1514,18 @@ static bool iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
                goto out_free_resp;
        }
 
-       if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_D3_CONTINUITY_API) {
-               status_v6 = (void *)cmd.resp_pkt->data;
-
-               status.pattern_number = le16_to_cpu(status_v6->pattern_number);
-               for (i = 0; i < 8; i++)
-                       status.qos_seq_ctr[i] =
-                               le16_to_cpu(status_v6->qos_seq_ctr[i]);
-               status.wakeup_reasons = le32_to_cpu(status_v6->wakeup_reasons);
-               status.wake_packet_length =
-                       le32_to_cpu(status_v6->wake_packet_length);
-               status.wake_packet_bufsize =
-                       le32_to_cpu(status_v6->wake_packet_bufsize);
-               status.wake_packet = status_v6->wake_packet;
-       } else {
-               struct iwl_wowlan_status_v4 *status_v4;
-               status_v6 = NULL;
-               status_v4 = (void *)cmd.resp_pkt->data;
-
-               status.pattern_number = le16_to_cpu(status_v4->pattern_number);
-               for (i = 0; i < 8; i++)
-                       status.qos_seq_ctr[i] =
-                               le16_to_cpu(status_v4->qos_seq_ctr[i]);
-               status.wakeup_reasons = le32_to_cpu(status_v4->wakeup_reasons);
-               status.wake_packet_length =
-                       le32_to_cpu(status_v4->wake_packet_length);
-               status.wake_packet_bufsize =
-                       le32_to_cpu(status_v4->wake_packet_bufsize);
-               status.wake_packet = status_v4->wake_packet;
-       }
+       fw_status = (void *)cmd.resp_pkt->data;
+
+       status.pattern_number = le16_to_cpu(fw_status->pattern_number);
+       for (i = 0; i < 8; i++)
+               status.qos_seq_ctr[i] =
+                       le16_to_cpu(fw_status->qos_seq_ctr[i]);
+       status.wakeup_reasons = le32_to_cpu(fw_status->wakeup_reasons);
+       status.wake_packet_length =
+               le32_to_cpu(fw_status->wake_packet_length);
+       status.wake_packet_bufsize =
+               le32_to_cpu(fw_status->wake_packet_bufsize);
+       status.wake_packet = fw_status->wake_packet;
 
        if (len != status_size + ALIGN(status.wake_packet_bufsize, 4)) {
                IWL_ERR(mvm, "Invalid WoWLAN status response!\n");
@@ -1571,7 +1552,7 @@ static bool iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
 
        iwl_mvm_report_wakeup_reasons(mvm, vif, &status);
 
-       keep = iwl_mvm_setup_connection_keep(mvm, vif, status_v6);
+       keep = iwl_mvm_setup_connection_keep(mvm, vif, fw_status);
 
        iwl_free_resp(&cmd);
        return keep;
@@ -1674,6 +1655,19 @@ int iwl_mvm_resume(struct ieee80211_hw *hw)
 {
        struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
 
+       if (iwl_mvm_is_d0i3_supported(mvm)) {
+               bool exit_now;
+
+               mutex_lock(&mvm->d0i3_suspend_mutex);
+               __clear_bit(D0I3_DEFER_WAKEUP, &mvm->d0i3_suspend_flags);
+               exit_now = __test_and_clear_bit(D0I3_PENDING_WAKEUP,
+                                               &mvm->d0i3_suspend_flags);
+               mutex_unlock(&mvm->d0i3_suspend_mutex);
+               if (exit_now)
+                       _iwl_mvm_exit_d0i3(mvm);
+               return 0;
+       }
+
        return __iwl_mvm_resume(mvm, false);
 }
 
index 9b59e1d7ae71ea888973992cb88363ba2371fdad..6047cfdafb959e99b6bb6e56d74d3164f1add1ae 100644 (file)
@@ -103,10 +103,6 @@ static void iwl_dbgfs_update_pm(struct iwl_mvm *mvm,
                IWL_DEBUG_POWER(mvm, "tx_data_timeout=%d\n", val);
                dbgfs_pm->tx_data_timeout = val;
                break;
-       case MVM_DEBUGFS_PM_DISABLE_POWER_OFF:
-               IWL_DEBUG_POWER(mvm, "disable_power_off=%d\n", val);
-               dbgfs_pm->disable_power_off = val;
-               break;
        case MVM_DEBUGFS_PM_LPRX_ENA:
                IWL_DEBUG_POWER(mvm, "lprx %s\n", val ? "enabled" : "disabled");
                dbgfs_pm->lprx_ena = val;
@@ -154,12 +150,6 @@ static ssize_t iwl_dbgfs_pm_params_write(struct ieee80211_vif *vif, char *buf,
                if (sscanf(buf + 16, "%d", &val) != 1)
                        return -EINVAL;
                param = MVM_DEBUGFS_PM_TX_DATA_TIMEOUT;
-       } else if (!strncmp("disable_power_off=", buf, 18) &&
-                  !(mvm->fw->ucode_capa.flags &
-                    IWL_UCODE_TLV_FLAGS_DEVICE_PS_CMD)) {
-               if (sscanf(buf + 18, "%d", &val) != 1)
-                       return -EINVAL;
-               param = MVM_DEBUGFS_PM_DISABLE_POWER_OFF;
        } else if (!strncmp("lprx=", buf, 5)) {
                if (sscanf(buf + 5, "%d", &val) != 1)
                        return -EINVAL;
@@ -592,8 +582,7 @@ void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
                return;
        }
 
-       if ((mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_PM_CMD_SUPPORT) &&
-           iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_CAM &&
+       if (iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_CAM &&
            ((vif->type == NL80211_IFTYPE_STATION && !vif->p2p) ||
             (vif->type == NL80211_IFTYPE_STATION && vif->p2p &&
              mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BSS_P2P_PS_DCM)))
index 1b52deea60812e4f6ac42c94412b1ecf518f44f7..f462c9baa2b569704fdee9bbfa70e76130ad42c1 100644 (file)
@@ -136,9 +136,6 @@ static int iwl_dbgfs_fw_error_dump_open(struct inode *inode, struct file *file)
 
        file->private_data = mvm->fw_error_dump;
        mvm->fw_error_dump = NULL;
-       kfree(mvm->fw_error_sram);
-       mvm->fw_error_sram = NULL;
-       mvm->fw_error_sram_len = 0;
        ret = 0;
 
 out:
@@ -1004,6 +1001,7 @@ static ssize_t iwl_dbgfs_d0i3_refs_read(struct file *file,
        PRINT_MVM_REF(IWL_MVM_REF_P2P_CLIENT);
        PRINT_MVM_REF(IWL_MVM_REF_AP_IBSS);
        PRINT_MVM_REF(IWL_MVM_REF_USER);
+       PRINT_MVM_REF(IWL_MVM_REF_EXIT_WORK);
 
        return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
 }
@@ -1108,9 +1106,9 @@ MVM_DEBUGFS_READ_WRITE_FILE_OPS(scan_ant_rxchain, 8);
 MVM_DEBUGFS_READ_WRITE_FILE_OPS(d0i3_refs, 8);
 
 static const struct file_operations iwl_dbgfs_fw_error_dump_ops = {
-        .open = iwl_dbgfs_fw_error_dump_open,
-        .read = iwl_dbgfs_fw_error_dump_read,
-        .release = iwl_dbgfs_fw_error_dump_release,
+       .open = iwl_dbgfs_fw_error_dump_open,
+       .read = iwl_dbgfs_fw_error_dump_read,
+       .release = iwl_dbgfs_fw_error_dump_release,
 };
 
 #ifdef CONFIG_IWLWIFI_BCAST_FILTERING
@@ -1138,9 +1136,8 @@ int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir)
        MVM_DEBUGFS_ADD_FILE(fw_error_dump, dbgfs_dir, S_IRUSR);
        MVM_DEBUGFS_ADD_FILE(bt_notif, dbgfs_dir, S_IRUSR);
        MVM_DEBUGFS_ADD_FILE(bt_cmd, dbgfs_dir, S_IRUSR);
-       if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_DEVICE_PS_CMD)
-               MVM_DEBUGFS_ADD_FILE(disable_power_off, mvm->debugfs_dir,
-                                    S_IRUSR | S_IWUSR);
+       MVM_DEBUGFS_ADD_FILE(disable_power_off, mvm->debugfs_dir,
+                            S_IRUSR | S_IWUSR);
        MVM_DEBUGFS_ADD_FILE(fw_rx_stats, mvm->debugfs_dir, S_IRUSR);
        MVM_DEBUGFS_ADD_FILE(drv_rx_stats, mvm->debugfs_dir, S_IRUSR);
        MVM_DEBUGFS_ADD_FILE(fw_restart, mvm->debugfs_dir, S_IWUSR);
index 21877e5966a8093d6780d3297944c1a214dc55e3..5fe82c29c8ad07bcb7bab43726a4e09b136d8b53 100644 (file)
@@ -141,7 +141,8 @@ enum iwl_bt_coex_lut_type {
        BT_COEX_TX_DIS_LUT,
 
        BT_COEX_MAX_LUT,
-};
+       BT_COEX_INVALID_LUT = 0xff,
+}; /* BT_COEX_DECISION_LUT_INDEX_API_E_VER_1 */
 
 #define BT_COEX_LUT_SIZE (12)
 #define BT_COEX_CORUN_LUT_SIZE (32)
@@ -154,19 +155,23 @@ enum iwl_bt_coex_lut_type {
  * @flags:&enum iwl_bt_coex_flags
  * @max_kill:
  * @bt_reduced_tx_power: enum %iwl_bt_reduced_tx_power
- * @bt4_antenna_isolation:
- * @bt4_antenna_isolation_thr:
- * @bt4_tx_tx_delta_freq_thr:
- * @bt4_tx_rx_max_freq0:
- * @bt_prio_boost:
+ * @override_primary_lut: enum %iwl_bt_coex_lut_type: BT_COEX_INVALID_LUT
+ *     should be set by default
+ * @override_secondary_lut: enum %iwl_bt_coex_lut_type: BT_COEX_INVALID_LUT
+ *     should be set by default
+ * @bt4_antenna_isolation: antenna isolation
+ * @bt4_antenna_isolation_thr: antenna threshold value
+ * @bt4_tx_tx_delta_freq_thr: TxTx delta frequency
+ * @bt4_tx_rx_max_freq0: TxRx max frequency
+ * @bt_prio_boost: BT priority boost registers
  * @wifi_tx_prio_boost: SW boost of wifi tx priority
  * @wifi_rx_prio_boost: SW boost of wifi rx priority
- * @kill_ack_msk:
- * @kill_cts_msk:
- * @decision_lut:
- * @bt4_multiprio_lut:
- * @bt4_corun_lut20:
- * @bt4_corun_lut40:
+ * @kill_ack_msk: kill ACK mask. 1 - Tx ACK, 0 - kill Tx of ACK.
+ * @kill_cts_msk: kill CTS mask. 1 - Tx CTS, 0 - kill Tx of CTS.
+ * @decision_lut: PTA decision LUT, per Prio-Ch
+ * @bt4_multiprio_lut: multi priority LUT configuration
+ * @bt4_corun_lut20: co-running 20 MHz LUT configuration
+ * @bt4_corun_lut40: co-running 40 MHz LUT configuration
  * @valid_bit_msk: enum %iwl_bt_coex_valid_bit_msk
  *
  * The structure is used for the BT_COEX command.
@@ -175,7 +180,8 @@ struct iwl_bt_coex_cmd {
        __le32 flags;
        u8 max_kill;
        u8 bt_reduced_tx_power;
-       u8 reserved[2];
+       u8 override_primary_lut;
+       u8 override_secondary_lut;
 
        u8 bt4_antenna_isolation;
        u8 bt4_antenna_isolation_thr;
@@ -194,7 +200,7 @@ struct iwl_bt_coex_cmd {
        __le32 bt4_corun_lut40[BT_COEX_CORUN_LUT_SIZE];
 
        __le32 valid_bit_msk;
-} __packed; /* BT_COEX_CMD_API_S_VER_3 */
+} __packed; /* BT_COEX_CMD_API_S_VER_5 */
 
 /**
  * struct iwl_bt_coex_ci_cmd - bt coex channel inhibition command
@@ -282,7 +288,7 @@ enum iwl_bt_activity_grading {
        BT_ON_NO_CONNECTION     = 1,
        BT_LOW_TRAFFIC          = 2,
        BT_HIGH_TRAFFIC         = 3,
-};
+}; /* BT_COEX_BT_ACTIVITY_GRADING_API_E_VER_1 */
 
 /**
  * struct iwl_bt_coex_profile_notif - notification about BT coex
@@ -310,7 +316,7 @@ struct iwl_bt_coex_profile_notif {
        __le32 primary_ch_lut;
        __le32 secondary_ch_lut;
        __le32 bt_activity_grading;
-} __packed; /* BT_COEX_PROFILE_NTFY_API_S_VER_2 */
+} __packed; /* BT_COEX_PROFILE_NTFY_API_S_VER_3 */
 
 enum iwl_bt_coex_prio_table_event {
        BT_COEX_PRIO_TBL_EVT_INIT_CALIB1                = 0,
index 10fcc1a79ebddf3087d7de7c2c29389849a425fa..13696fe419b778c68c9d72d7a289a3dd3c453b39 100644 (file)
@@ -345,21 +345,6 @@ enum iwl_wowlan_wakeup_reason {
        IWL_WOWLAN_WAKEUP_BY_REM_WAKE_WAKEUP_PACKET             = BIT(12),
 }; /* WOWLAN_WAKE_UP_REASON_API_E_VER_2 */
 
-struct iwl_wowlan_status_v4 {
-       __le64 replay_ctr;
-       __le16 pattern_number;
-       __le16 non_qos_seq_ctr;
-       __le16 qos_seq_ctr[8];
-       __le32 wakeup_reasons;
-       __le32 rekey_status;
-       __le32 num_of_gtk_rekeys;
-       __le32 transmitted_ndps;
-       __le32 received_beacons;
-       __le32 wake_packet_length;
-       __le32 wake_packet_bufsize;
-       u8 wake_packet[]; /* can be truncated from _length to _bufsize */
-} __packed; /* WOWLAN_STATUSES_API_S_VER_4 */
-
 struct iwl_wowlan_gtk_status {
        u8 key_index;
        u8 reserved[3];
@@ -368,7 +353,7 @@ struct iwl_wowlan_gtk_status {
        struct iwl_wowlan_rsc_tsc_params_cmd rsc;
 } __packed;
 
-struct iwl_wowlan_status_v6 {
+struct iwl_wowlan_status {
        struct iwl_wowlan_gtk_status gtk;
        __le64 replay_ctr;
        __le16 pattern_number;
index 39148b5bb33262596e1dea348c1ae32c9a2c9166..8bb5b94bf9639689fa6445cd046f97eccbcec834 100644 (file)
@@ -334,7 +334,7 @@ enum {
  */
 struct iwl_lq_cmd {
        u8 sta_id;
-       u8 reserved1;
+       u8 reduced_tpc;
        u16 control;
        /* LINK_QUAL_GENERAL_PARAMS_API_S_VER_1 */
        u8 flags;
index 9426905de6b283dc0230cf51d5a694478da7797a..6174c027ff594e30c29c5c263c1efa33e6a5f5e8 100644 (file)
@@ -169,8 +169,12 @@ enum iwl_scan_type {
        SCAN_TYPE_DISCOVERY_FORCED      = 6,
 }; /* SCAN_ACTIVITY_TYPE_E_VER_1 */
 
-/* Maximal number of channels to scan */
-#define MAX_NUM_SCAN_CHANNELS 0x24
+/**
+ * Maximal number of channels to scan
+ * it should be equal to:
+ * max(IWL_NUM_CHANNELS, IWL_NUM_CHANNELS_FAMILY_8000)
+ */
+#define MAX_NUM_SCAN_CHANNELS 50
 
 /**
  * struct iwl_scan_cmd - scan request command
@@ -534,13 +538,16 @@ struct iwl_scan_offload_schedule {
  *
  * IWL_SCAN_OFFLOAD_FLAG_PASS_ALL: pass all results - no filtering.
  * IWL_SCAN_OFFLOAD_FLAG_CACHED_CHANNEL: add cached channels to partial scan.
- * IWL_SCAN_OFFLOAD_FLAG_ENERGY_SCAN: use energy based scan before partial scan
- *     on A band.
+ * IWL_SCAN_OFFLOAD_FLAG_EBS_QUICK_MODE: EBS duration is 100mSec - typical
+ *     beacon period. Finding channel activity in this mode is not guaranteed.
+ * IWL_SCAN_OFFLOAD_FLAG_EBS_ACCURATE_MODE: EBS duration is 200mSec.
+ *     Assuming beacon period is 100ms finding channel activity is guaranteed.
  */
 enum iwl_scan_offload_flags {
        IWL_SCAN_OFFLOAD_FLAG_PASS_ALL          = BIT(0),
        IWL_SCAN_OFFLOAD_FLAG_CACHED_CHANNEL    = BIT(2),
-       IWL_SCAN_OFFLOAD_FLAG_ENERGY_SCAN       = BIT(3),
+       IWL_SCAN_OFFLOAD_FLAG_EBS_QUICK_MODE    = BIT(5),
+       IWL_SCAN_OFFLOAD_FLAG_EBS_ACCURATE_MODE = BIT(6),
 };
 
 /**
@@ -563,17 +570,24 @@ enum iwl_scan_offload_compleate_status {
        IWL_SCAN_OFFLOAD_ABORTED        = 2,
 };
 
+enum iwl_scan_ebs_status {
+       IWL_SCAN_EBS_SUCCESS,
+       IWL_SCAN_EBS_FAILED,
+       IWL_SCAN_EBS_CHAN_NOT_FOUND,
+};
+
 /**
  * iwl_scan_offload_complete - SCAN_OFFLOAD_COMPLETE_NTF_API_S_VER_1
  * @last_schedule_line:                last schedule line executed (fast or regular)
  * @last_schedule_iteration:   last scan iteration executed before scan abort
  * @status:                    enum iwl_scan_offload_compleate_status
+ * @ebs_status: last EBS status, see IWL_SCAN_EBS_*
  */
 struct iwl_scan_offload_complete {
        u8 last_schedule_line;
        u8 last_schedule_iteration;
        u8 status;
-       u8 reserved;
+       u8 ebs_status;
 } __packed;
 
 /**
index d636478672626e9436c12dc2313d3288d94303ed..39cebee8016feaab62f005e5e843447784594429 100644 (file)
@@ -255,22 +255,19 @@ struct iwl_mvm_keyinfo {
 } __packed;
 
 /**
- * struct iwl_mvm_add_sta_cmd_v5 - Add/modify a station in the fw's sta table.
+ * struct iwl_mvm_add_sta_cmd - Add/modify a station in the fw's sta table.
  * ( REPLY_ADD_STA = 0x18 )
  * @add_modify: 1: modify existing, 0: add new station
- * @unicast_tx_key_id: unicast tx key id. Relevant only when unicast key sent
- * @multicast_tx_key_id: multicast tx key id. Relevant only when multicast key
- *     sent
+ * @awake_acs:
+ * @tid_disable_tx: is tid BIT(tid) enabled for Tx. Clear BIT(x) to enable
+ *     AMPDU for tid x. Set %STA_MODIFY_TID_DISABLE_TX to change this field.
  * @mac_id_n_color: the Mac context this station belongs to
  * @addr[ETH_ALEN]: station's MAC address
  * @sta_id: index of station in uCode's station table
  * @modify_mask: STA_MODIFY_*, selects which parameters to modify vs. leave
  *     alone. 1 - modify, 0 - don't change.
- * @key: look at %iwl_mvm_keyinfo
  * @station_flags: look at %iwl_sta_flags
  * @station_flags_msk: what of %station_flags have changed
- * @tid_disable_tx: is tid BIT(tid) enabled for Tx. Clear BIT(x) to enable
- *     AMPDU for tid x. Set %STA_MODIFY_TID_DISABLE_TX to change this field.
  * @add_immediate_ba_tid: tid for which to add block-ack support (Rx)
  *     Set %STA_MODIFY_ADD_BA_TID to use this field, and also set
  *     add_immediate_ba_ssn.
@@ -294,40 +291,7 @@ struct iwl_mvm_keyinfo {
  * ADD_STA sets up the table entry for one station, either creating a new
  * entry, or modifying a pre-existing one.
  */
-struct iwl_mvm_add_sta_cmd_v5 {
-       u8 add_modify;
-       u8 unicast_tx_key_id;
-       u8 multicast_tx_key_id;
-       u8 reserved1;
-       __le32 mac_id_n_color;
-       u8 addr[ETH_ALEN];
-       __le16 reserved2;
-       u8 sta_id;
-       u8 modify_mask;
-       __le16 reserved3;
-       struct iwl_mvm_keyinfo key;
-       __le32 station_flags;
-       __le32 station_flags_msk;
-       __le16 tid_disable_tx;
-       __le16 reserved4;
-       u8 add_immediate_ba_tid;
-       u8 remove_immediate_ba_tid;
-       __le16 add_immediate_ba_ssn;
-       __le16 sleep_tx_count;
-       __le16 sleep_state_flags;
-       __le16 assoc_id;
-       __le16 beamform_flags;
-       __le32 tfd_queue_msk;
-} __packed; /* ADD_STA_CMD_API_S_VER_5 */
-
-/**
- * struct iwl_mvm_add_sta_cmd_v7 - Add / modify a station
- * VER_7 of this command is quite similar to VER_5 except
- * exclusion of all fields related to the security key installation.
- * It only differs from VER_6 by the "awake_acs" field that is
- * reserved and ignored in VER_6.
- */
-struct iwl_mvm_add_sta_cmd_v7 {
+struct iwl_mvm_add_sta_cmd {
        u8 add_modify;
        u8 awake_acs;
        __le16 tid_disable_tx;
index 8e122f3a7a74e8a97914a13d9821a7229bc7c2bc..6cc5f52b807f1bc343ea632674e215423c3abb1d 100644 (file)
@@ -482,7 +482,8 @@ struct iwl_mvm_tx_resp {
        u8 pa_integ_res_b[3];
        u8 pa_integ_res_c[3];
        __le16 measurement_req_id;
-       __le16 reserved;
+       u8 reduced_tpc;
+       u8 reserved;
 
        __le32 tfd_info;
        __le16 seq_ctl;
index 6e75b52588de3ca68a44c41ca339df1e57eae37f..309a9b9a94fecc26918f967e7b9e7a01374d43b3 100644 (file)
@@ -71,6 +71,7 @@
 #include "fw-api-power.h"
 #include "fw-api-d3.h"
 #include "fw-api-coex.h"
+#include "fw-api-scan.h"
 
 /* maximal number of Tx queues in any platform */
 #define IWL_MVM_MAX_QUEUES     20
@@ -604,52 +605,7 @@ enum {
        TE_V1_NOTIF_INTERNAL_FRAG_END = BIT(7),
 }; /* MAC_EVENT_ACTION_API_E_VER_2 */
 
-
-/**
- * struct iwl_time_event_cmd_api_v1 - configuring Time Events
- * with struct MAC_TIME_EVENT_DATA_API_S_VER_1 (see also
- * with version 2. determined by IWL_UCODE_TLV_FLAGS)
- * ( TIME_EVENT_CMD = 0x29 )
- * @id_and_color: ID and color of the relevant MAC
- * @action: action to perform, one of FW_CTXT_ACTION_*
- * @id: this field has two meanings, depending on the action:
- *     If the action is ADD, then it means the type of event to add.
- *     For all other actions it is the unique event ID assigned when the
- *     event was added by the FW.
- * @apply_time: When to start the Time Event (in GP2)
- * @max_delay: maximum delay to event's start (apply time), in TU
- * @depends_on: the unique ID of the event we depend on (if any)
- * @interval: interval between repetitions, in TU
- * @interval_reciprocal: 2^32 / interval
- * @duration: duration of event in TU
- * @repeat: how many repetitions to do, can be TE_REPEAT_ENDLESS
- * @dep_policy: one of TE_V1_INDEPENDENT, TE_V1_DEP_OTHER, TE_V1_DEP_TSF
- *     and TE_V1_EVENT_SOCIOPATHIC
- * @is_present: 0 or 1, are we present or absent during the Time Event
- * @max_frags: maximal number of fragments the Time Event can be divided to
- * @notify: notifications using TE_V1_NOTIF_* (whom to notify when)
- */
-struct iwl_time_event_cmd_v1 {
-       /* COMMON_INDEX_HDR_API_S_VER_1 */
-       __le32 id_and_color;
-       __le32 action;
-       __le32 id;
-       /* MAC_TIME_EVENT_DATA_API_S_VER_1 */
-       __le32 apply_time;
-       __le32 max_delay;
-       __le32 dep_policy;
-       __le32 depends_on;
-       __le32 is_present;
-       __le32 max_frags;
-       __le32 interval;
-       __le32 interval_reciprocal;
-       __le32 duration;
-       __le32 repeat;
-       __le32 notify;
-} __packed; /* MAC_TIME_EVENT_CMD_API_S_VER_1 */
-
-
-/* Time event - defines for command API v2 */
+/* Time event - defines for command API */
 
 /*
  * @TE_V2_FRAG_NONE: fragmentation of the time event is NOT allowed.
@@ -680,7 +636,7 @@ enum {
 #define TE_V2_PLACEMENT_POS    12
 #define TE_V2_ABSENCE_POS      15
 
-/* Time event policy values (for time event cmd api v2)
+/* Time event policy values
  * A notification (both event and fragment) includes a status indicating weather
  * the FW was able to schedule the event or not. For fragment start/end
  * notification the status is always success. There is no start/end fragment
@@ -727,7 +683,7 @@ enum {
 };
 
 /**
- * struct iwl_time_event_cmd_api_v2 - configuring Time Events
+ * struct iwl_time_event_cmd_api - configuring Time Events
  * with struct MAC_TIME_EVENT_DATA_API_S_VER_2 (see also
  * with version 1. determined by IWL_UCODE_TLV_FLAGS)
  * ( TIME_EVENT_CMD = 0x29 )
@@ -750,7 +706,7 @@ enum {
  *     TE_EVENT_SOCIOPATHIC
  *     using TE_ABSENCE and using TE_NOTIF_*
  */
-struct iwl_time_event_cmd_v2 {
+struct iwl_time_event_cmd {
        /* COMMON_INDEX_HDR_API_S_VER_1 */
        __le32 id_and_color;
        __le32 action;
index 58c8941c0d95ef6d6e0d6bb51349663dbf3b93dd..f381908be7e56bfaa648dea3340415badd903393 100644 (file)
  * enum iwl_fw_error_dump_type - types of data in the dump file
  * @IWL_FW_ERROR_DUMP_SRAM:
  * @IWL_FW_ERROR_DUMP_REG:
+ * @IWL_FW_ERROR_DUMP_RXF:
  */
 enum iwl_fw_error_dump_type {
        IWL_FW_ERROR_DUMP_SRAM = 0,
        IWL_FW_ERROR_DUMP_REG = 1,
+       IWL_FW_ERROR_DUMP_RXF = 2,
 
        IWL_FW_ERROR_DUMP_MAX,
 };
@@ -89,7 +91,7 @@ struct iwl_fw_error_dump_data {
        __le32 type;
        __le32 len;
        __u8 data[];
-} __packed __aligned(4);
+} __packed;
 
 /**
  * struct iwl_fw_error_dump_file - the layout of the header of the file
@@ -101,6 +103,6 @@ struct iwl_fw_error_dump_file {
        __le32 barker;
        __le32 file_len;
        u8 data[0];
-} __packed __aligned(4);
+} __packed;
 
 #endif /* __fw_error_dump_h__ */
index 7ce20062f32d443be34fe87865d91afd71a0a014..3d99cf564ba6ee472eac8a3afee40f9a8e692b85 100644 (file)
@@ -288,7 +288,7 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
                goto error;
        }
 
-       ret = iwl_send_bt_prio_tbl(mvm);
+       ret = iwl_send_bt_init_conf(mvm);
        if (ret)
                goto error;
 
@@ -424,10 +424,6 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
        if (ret)
                goto error;
 
-       ret = iwl_send_bt_prio_tbl(mvm);
-       if (ret)
-               goto error;
-
        ret = iwl_send_bt_init_conf(mvm);
        if (ret)
                goto error;
@@ -468,12 +464,6 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
        /* Initialize tx backoffs to the minimal possible */
        iwl_mvm_tt_tx_backoff(mvm, 0);
 
-       if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_PM_CMD_SUPPORT)) {
-               ret = iwl_power_legacy_set_cam_mode(mvm);
-               if (ret)
-                       goto error;
-       }
-
        ret = iwl_mvm_power_update_device(mvm);
        if (ret)
                goto error;
index 9ccec10bba166299cc91cf1706992937127d277a..7110ec2605d667ddd4913fa89427dc05b2a083ea 100644 (file)
@@ -667,12 +667,9 @@ static void iwl_mvm_mac_ctxt_cmd_common(struct iwl_mvm *mvm,
        if (vif->bss_conf.qos)
                cmd->qos_flags |= cpu_to_le32(MAC_QOS_FLG_UPDATE_EDCA);
 
-       /* Don't use cts to self as the fw doesn't support it currently. */
        if (vif->bss_conf.use_cts_prot) {
                cmd->protection_flags |= cpu_to_le32(MAC_PROT_FLG_TGG_PROTECT);
-               if (IWL_UCODE_API(mvm->fw->ucode_ver) >= 8)
-                       cmd->protection_flags |=
-                               cpu_to_le32(MAC_PROT_FLG_SELF_CTS_EN);
+               cmd->protection_flags |= cpu_to_le32(MAC_PROT_FLG_SELF_CTS_EN);
        }
        IWL_DEBUG_RATE(mvm, "use_cts_prot %d, ht_operation_mode %d\n",
                       vif->bss_conf.use_cts_prot,
index f0cebf12c7b8415a3c787d0cc77a9b2b1c2a15ef..97c3deae655273ff3c5184cd86a4722f34714eae 100644 (file)
@@ -276,6 +276,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
                    IEEE80211_HW_AMPDU_AGGREGATION |
                    IEEE80211_HW_TIMING_BEACON_ONLY |
                    IEEE80211_HW_CONNECTION_MONITOR |
+                   IEEE80211_HW_SUPPORTS_UAPSD |
                    IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS |
                    IEEE80211_HW_SUPPORTS_STATIC_SMPS;
 
@@ -285,6 +286,8 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
                                    IEEE80211_RADIOTAP_MCS_HAVE_STBC;
        hw->radiotap_vht_details |= IEEE80211_RADIOTAP_VHT_KNOWN_STBC;
        hw->rate_control_algorithm = "iwl-mvm-rs";
+       hw->uapsd_queues = IWL_UAPSD_AC_INFO;
+       hw->uapsd_max_sp_len = IWL_UAPSD_MAX_SP;
 
        /*
         * Enable 11w if advertised by firmware and software crypto
@@ -295,11 +298,9 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
            !iwlwifi_mod_params.sw_crypto)
                hw->flags |= IEEE80211_HW_MFP_CAPABLE;
 
-       if (0 && mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT) {
-               hw->flags |= IEEE80211_HW_SUPPORTS_UAPSD;
-               hw->uapsd_queues = IWL_UAPSD_AC_INFO;
-               hw->uapsd_max_sp_len = IWL_UAPSD_MAX_SP;
-       }
+       /* Disable uAPSD due to firmware issues */
+       if (true)
+               hw->flags &= ~IEEE80211_HW_SUPPORTS_UAPSD;
 
        hw->sta_data_size = sizeof(struct iwl_mvm_sta);
        hw->vif_data_size = sizeof(struct iwl_mvm_vif);
@@ -309,11 +310,8 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
                BIT(NL80211_IFTYPE_P2P_CLIENT) |
                BIT(NL80211_IFTYPE_AP) |
                BIT(NL80211_IFTYPE_P2P_GO) |
-               BIT(NL80211_IFTYPE_P2P_DEVICE);
-
-       /* IBSS has bugs in older versions */
-       if (IWL_UCODE_API(mvm->fw->ucode_ver) >= 8)
-               hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_ADHOC);
+               BIT(NL80211_IFTYPE_P2P_DEVICE) |
+               BIT(NL80211_IFTYPE_ADHOC);
 
        hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
        hw->wiphy->regulatory_flags |= REGULATORY_CUSTOM_REG |
@@ -365,14 +363,11 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
        else
                hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
 
-       if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_SCHED_SCAN) {
-               hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_SCHED_SCAN;
-               hw->wiphy->max_sched_scan_ssids = PROBE_OPTION_MAX;
-               hw->wiphy->max_match_sets = IWL_SCAN_MAX_PROFILES;
-               /* we create the 802.11 header and zero length SSID IE. */
-               hw->wiphy->max_sched_scan_ie_len =
-                                       SCAN_OFFLOAD_PROBE_REQ_SIZE - 24 - 2;
-       }
+       hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_SCHED_SCAN;
+       hw->wiphy->max_sched_scan_ssids = PROBE_OPTION_MAX;
+       hw->wiphy->max_match_sets = IWL_SCAN_MAX_PROFILES;
+       /* we create the 802.11 header and zero length SSID IE. */
+       hw->wiphy->max_sched_scan_ie_len = SCAN_OFFLOAD_PROBE_REQ_SIZE - 24 - 2;
 
        hw->wiphy->features |= NL80211_FEATURE_P2P_GO_CTWIN |
                               NL80211_FEATURE_P2P_GO_OPPPS;
@@ -386,7 +381,11 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
        }
 
 #ifdef CONFIG_PM_SLEEP
-       if (mvm->fw->img[IWL_UCODE_WOWLAN].sec[0].len &&
+       if (iwl_mvm_is_d0i3_supported(mvm) &&
+           device_can_wakeup(mvm->trans->dev)) {
+               mvm->wowlan.flags = WIPHY_WOWLAN_ANY;
+               hw->wiphy->wowlan = &mvm->wowlan;
+       } else if (mvm->fw->img[IWL_UCODE_WOWLAN].sec[0].len &&
            mvm->trans->ops->d3_suspend &&
            mvm->trans->ops->d3_resume &&
            device_can_wakeup(mvm->trans->dev)) {
@@ -827,8 +826,7 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
                goto out_remove_mac;
 
        if (!mvm->bf_allowed_vif &&
-           vif->type == NL80211_IFTYPE_STATION && !vif->p2p &&
-           mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BF_UPDATED){
+           vif->type == NL80211_IFTYPE_STATION && !vif->p2p) {
                mvm->bf_allowed_vif = mvmvif;
                vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER |
                                     IEEE80211_VIF_SUPPORTS_CQM_RSSI;
@@ -1223,6 +1221,10 @@ static int iwl_mvm_configure_bcast_filter(struct iwl_mvm *mvm,
        if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BCAST_FILTERING))
                return 0;
 
+       /* bcast filtering isn't supported for P2P client */
+       if (vif->p2p)
+               return 0;
+
        if (!iwl_mvm_bcast_filter_build_cmd(mvm, &cmd))
                return 0;
 
@@ -1697,6 +1699,11 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
                ret = iwl_mvm_add_sta(mvm, vif, sta);
        } else if (old_state == IEEE80211_STA_NONE &&
                   new_state == IEEE80211_STA_AUTH) {
+               /*
+                * EBS may be disabled due to previous failures reported by FW.
+                * Reset EBS status here assuming environment has been changed.
+                */
+               mvm->last_ebs_successful = true;
                ret = 0;
        } else if (old_state == IEEE80211_STA_AUTH &&
                   new_state == IEEE80211_STA_ASSOC) {
index d564233a65da6157c1aaf16a099ddf94b3be933e..17c42da5f9f272e253175e42837d665e7e1b3061 100644 (file)
@@ -164,7 +164,6 @@ enum iwl_dbgfs_pm_mask {
        MVM_DEBUGFS_PM_SKIP_DTIM_PERIODS = BIT(2),
        MVM_DEBUGFS_PM_RX_DATA_TIMEOUT = BIT(3),
        MVM_DEBUGFS_PM_TX_DATA_TIMEOUT = BIT(4),
-       MVM_DEBUGFS_PM_DISABLE_POWER_OFF = BIT(5),
        MVM_DEBUGFS_PM_LPRX_ENA = BIT(6),
        MVM_DEBUGFS_PM_LPRX_RSSI_THRESHOLD = BIT(7),
        MVM_DEBUGFS_PM_SNOOZE_ENABLE = BIT(8),
@@ -177,7 +176,6 @@ struct iwl_dbgfs_pm {
        u32 tx_data_timeout;
        bool skip_over_dtim;
        u8 skip_dtim_periods;
-       bool disable_power_off;
        bool lprx_ena;
        u32 lprx_rssi_threshold;
        bool snooze_ena;
@@ -232,6 +230,7 @@ enum iwl_mvm_ref_type {
        IWL_MVM_REF_USER,
        IWL_MVM_REF_TX,
        IWL_MVM_REF_TX_AGG,
+       IWL_MVM_REF_EXIT_WORK,
 
        IWL_MVM_REF_COUNT,
 };
@@ -265,6 +264,7 @@ struct iwl_mvm_vif_bf_data {
  * @uploaded: indicates the MAC context has been added to the device
  * @ap_ibss_active: indicates that AP/IBSS is configured and that the interface
  *     should get quota etc.
+ * @pm_enabled - Indicate if MAC power management is allowed
  * @monitor_active: indicates that monitor context is configured, and that the
  *     interface should get quota etc.
  * @low_latency: indicates that this interface is in low-latency mode
@@ -283,6 +283,7 @@ struct iwl_mvm_vif {
 
        bool uploaded;
        bool ap_ibss_active;
+       bool pm_enabled;
        bool monitor_active;
        bool low_latency;
        struct iwl_mvm_vif_bf_data bf_data;
@@ -451,6 +452,11 @@ struct iwl_mvm_frame_stats {
        int last_frame_idx;
 };
 
+enum {
+       D0I3_DEFER_WAKEUP,
+       D0I3_PENDING_WAKEUP,
+};
+
 struct iwl_mvm {
        /* for logger access */
        struct device *dev;
@@ -535,6 +541,8 @@ struct iwl_mvm {
        /* Internal station */
        struct iwl_mvm_int_sta aux_sta;
 
+       bool last_ebs_successful;
+
        u8 scan_last_antenna_idx; /* to toggle TX between antennas */
        u8 mgmt_last_antenna_idx;
 
@@ -578,6 +586,8 @@ struct iwl_mvm {
        void *fw_error_dump;
        void *fw_error_sram;
        u32 fw_error_sram_len;
+       u32 *fw_error_rxf;
+       u32 fw_error_rxf_len;
 
        struct led_classdev led;
 
@@ -601,6 +611,9 @@ struct iwl_mvm {
        bool d0i3_offloading;
        struct work_struct d0i3_exit_work;
        struct sk_buff_head d0i3_tx;
+       /* protect d0i3_suspend_flags */
+       struct mutex d0i3_suspend_mutex;
+       unsigned long d0i3_suspend_flags;
        /* sync d0i3_tx queue and IWL_MVM_STATUS_IN_D0I3 status flag */
        spinlock_t d0i3_tx_lock;
        wait_queue_head_t d0i3_exit_waitq;
@@ -629,8 +642,6 @@ struct iwl_mvm {
 
        /* Indicate if device power save is allowed */
        bool ps_disabled;
-       /* Indicate if device power management is allowed */
-       bool pm_disabled;
 };
 
 /* Extract MVM priv from op_mode and _hw */
@@ -705,6 +716,7 @@ void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm);
 #ifdef CONFIG_IWLWIFI_DEBUGFS
 void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm);
 void iwl_mvm_fw_error_sram_dump(struct iwl_mvm *mvm);
+void iwl_mvm_fw_error_rxf_dump(struct iwl_mvm *mvm);
 #endif
 u8 first_antenna(u8 mask);
 u8 iwl_mvm_next_antenna(struct iwl_mvm *mvm, u8 valid, u8 last_idx);
@@ -874,8 +886,6 @@ void iwl_mvm_update_frame_stats(struct iwl_mvm *mvm,
 int rs_pretty_print_rate(char *buf, const u32 rate);
 
 /* power management */
-int iwl_power_legacy_set_cam_mode(struct iwl_mvm *mvm);
-
 int iwl_mvm_power_update_device(struct iwl_mvm *mvm);
 int iwl_mvm_power_update_mac(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
 int iwl_mvm_power_mac_dbgfs_read(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
@@ -922,9 +932,9 @@ int iwl_mvm_send_proto_offload(struct iwl_mvm *mvm,
 void iwl_mvm_ref(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type);
 void iwl_mvm_unref(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type);
 void iwl_mvm_d0i3_enable_tx(struct iwl_mvm *mvm, __le16 *qos_seq);
+int _iwl_mvm_exit_d0i3(struct iwl_mvm *mvm);
 
 /* BT Coex */
-int iwl_send_bt_prio_tbl(struct iwl_mvm *mvm);
 int iwl_send_bt_init_conf(struct iwl_mvm *mvm);
 int iwl_mvm_rx_bt_coex_notif(struct iwl_mvm *mvm,
                             struct iwl_rx_cmd_buffer *rxb,
@@ -936,6 +946,8 @@ u16 iwl_mvm_coex_agg_time_limit(struct iwl_mvm *mvm,
                                struct ieee80211_sta *sta);
 bool iwl_mvm_bt_coex_is_mimo_allowed(struct iwl_mvm *mvm,
                                     struct ieee80211_sta *sta);
+bool iwl_mvm_bt_coex_is_tpc_allowed(struct iwl_mvm *mvm,
+                                   enum ieee80211_band band);
 u8 iwl_mvm_bt_coex_tx_prio(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr,
                           struct ieee80211_tx_info *info, u8 ac);
 int iwl_mvm_bt_coex_reduced_txp(struct iwl_mvm *mvm, u8 sta_id, bool enable);
index 9545d7fdd4bfc69dfb1fb8c4e07de097d58b6ea7..7a5a8bac5fd0612f0f10e7d34847c39ca2428906 100644 (file)
@@ -402,6 +402,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
        mvm->sf_state = SF_UNINIT;
 
        mutex_init(&mvm->mutex);
+       mutex_init(&mvm->d0i3_suspend_mutex);
        spin_lock_init(&mvm->async_handlers_lock);
        INIT_LIST_HEAD(&mvm->time_event_list);
        INIT_LIST_HEAD(&mvm->async_handlers_list);
@@ -538,6 +539,7 @@ static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode)
        kfree(mvm->scan_cmd);
        vfree(mvm->fw_error_dump);
        kfree(mvm->fw_error_sram);
+       kfree(mvm->fw_error_rxf);
        kfree(mvm->mcast_filter_cmd);
        mvm->mcast_filter_cmd = NULL;
 
@@ -821,8 +823,9 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
                return;
 
        file_len = mvm->fw_error_sram_len +
+                  mvm->fw_error_rxf_len +
                   sizeof(*dump_file) +
-                  sizeof(*dump_data);
+                  sizeof(*dump_data) * 2;
 
        dump_file = vmalloc(file_len);
        if (!dump_file)
@@ -833,7 +836,12 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
        dump_file->barker = cpu_to_le32(IWL_FW_ERROR_DUMP_BARKER);
        dump_file->file_len = cpu_to_le32(file_len);
        dump_data = (void *)dump_file->data;
-       dump_data->type = IWL_FW_ERROR_DUMP_SRAM;
+       dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_RXF);
+       dump_data->len = cpu_to_le32(mvm->fw_error_rxf_len);
+       memcpy(dump_data->data, mvm->fw_error_rxf, mvm->fw_error_rxf_len);
+
+       dump_data = (void *)((u8 *)dump_data->data + mvm->fw_error_rxf_len);
+       dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_SRAM);
        dump_data->len = cpu_to_le32(mvm->fw_error_sram_len);
 
        /*
@@ -842,6 +850,14 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
         * mvm->fw_error_sram right now.
         */
        memcpy(dump_data->data, mvm->fw_error_sram, mvm->fw_error_sram_len);
+
+       kfree(mvm->fw_error_rxf);
+       mvm->fw_error_rxf = NULL;
+       mvm->fw_error_rxf_len = 0;
+
+       kfree(mvm->fw_error_sram);
+       mvm->fw_error_sram = NULL;
+       mvm->fw_error_sram_len = 0;
 }
 #endif
 
@@ -853,6 +869,7 @@ static void iwl_mvm_nic_error(struct iwl_op_mode *op_mode)
 
 #ifdef CONFIG_IWLWIFI_DEBUGFS
        iwl_mvm_fw_error_sram_dump(mvm);
+       iwl_mvm_fw_error_rxf_dump(mvm);
 #endif
 
        iwl_mvm_nic_restart(mvm);
@@ -1128,7 +1145,7 @@ static void iwl_mvm_d0i3_exit_work(struct work_struct *wk)
                .id = WOWLAN_GET_STATUSES,
                .flags = CMD_SYNC | CMD_HIGH_PRIO | CMD_WANT_SKB,
        };
-       struct iwl_wowlan_status_v6 *status;
+       struct iwl_wowlan_status *status;
        int ret;
        u32 disconnection_reasons, wakeup_reasons;
        __le16 *qos_seq = NULL;
@@ -1158,18 +1175,27 @@ static void iwl_mvm_d0i3_exit_work(struct work_struct *wk)
        iwl_free_resp(&get_status_cmd);
 out:
        iwl_mvm_d0i3_enable_tx(mvm, qos_seq);
+       iwl_mvm_unref(mvm, IWL_MVM_REF_EXIT_WORK);
        mutex_unlock(&mvm->mutex);
 }
 
-static int iwl_mvm_exit_d0i3(struct iwl_op_mode *op_mode)
+int _iwl_mvm_exit_d0i3(struct iwl_mvm *mvm)
 {
-       struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
        u32 flags = CMD_ASYNC | CMD_HIGH_PRIO | CMD_SEND_IN_IDLE |
                    CMD_WAKE_UP_TRANS;
        int ret;
 
        IWL_DEBUG_RPM(mvm, "MVM exiting D0i3\n");
 
+       mutex_lock(&mvm->d0i3_suspend_mutex);
+       if (test_bit(D0I3_DEFER_WAKEUP, &mvm->d0i3_suspend_flags)) {
+               IWL_DEBUG_RPM(mvm, "Deferring d0i3 exit until resume\n");
+               __set_bit(D0I3_PENDING_WAKEUP, &mvm->d0i3_suspend_flags);
+               mutex_unlock(&mvm->d0i3_suspend_mutex);
+               return 0;
+       }
+       mutex_unlock(&mvm->d0i3_suspend_mutex);
+
        ret = iwl_mvm_send_cmd_pdu(mvm, D0I3_END_CMD, flags, 0, NULL);
        if (ret)
                goto out;
@@ -1183,6 +1209,25 @@ out:
        return ret;
 }
 
+static int iwl_mvm_exit_d0i3(struct iwl_op_mode *op_mode)
+{
+       struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
+
+       iwl_mvm_ref(mvm, IWL_MVM_REF_EXIT_WORK);
+       return _iwl_mvm_exit_d0i3(mvm);
+}
+
+static void iwl_mvm_napi_add(struct iwl_op_mode *op_mode,
+                            struct napi_struct *napi,
+                            struct net_device *napi_dev,
+                            int (*poll)(struct napi_struct *, int),
+                            int weight)
+{
+       struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
+
+       ieee80211_napi_add(mvm->hw, napi, napi_dev, poll, weight);
+}
+
 static const struct iwl_op_mode_ops iwl_mvm_ops = {
        .start = iwl_op_mode_mvm_start,
        .stop = iwl_op_mode_mvm_stop,
@@ -1196,4 +1241,5 @@ static const struct iwl_op_mode_ops iwl_mvm_ops = {
        .nic_config = iwl_mvm_nic_config,
        .enter_d0i3 = iwl_mvm_enter_d0i3,
        .exit_d0i3 = iwl_mvm_exit_d0i3,
+       .napi_add = iwl_mvm_napi_add,
 };
index 6b636eab33391cbec4957180efe2e74d2ad07388..78309f7d0b7b8caea2c9443c81ae42e1f299e7ec 100644 (file)
@@ -268,6 +268,30 @@ static void iwl_mvm_power_configure_uapsd(struct iwl_mvm *mvm,
                IWL_MVM_PS_HEAVY_RX_THLD_PERCENT;
 }
 
+static bool iwl_mvm_power_allow_uapsd(struct iwl_mvm *mvm,
+                                      struct ieee80211_vif *vif)
+{
+       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+       if (!memcmp(mvmvif->uapsd_misbehaving_bssid, vif->bss_conf.bssid,
+                   ETH_ALEN))
+               return false;
+
+       if (vif->p2p &&
+           !(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_P2P_PS_UAPSD))
+               return false;
+       /*
+        * Avoid using uAPSD if P2P client is associated to GO that uses
+        * opportunistic power save. This is due to current FW limitation.
+        */
+       if (vif->p2p &&
+           (vif->bss_conf.p2p_noa_attr.oppps_ctwindow &
+           IEEE80211_P2P_OPPPS_ENABLE_BIT))
+               return false;
+
+       return true;
+}
+
 static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm,
                                    struct ieee80211_vif *vif,
                                    struct iwl_mac_power_cmd *cmd)
@@ -280,7 +304,6 @@ static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm,
        bool radar_detect = false;
        struct iwl_mvm_vif *mvmvif __maybe_unused =
                iwl_mvm_vif_from_mac80211(vif);
-       bool allow_uapsd = true;
 
        cmd->id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id,
                                                            mvmvif->color));
@@ -303,13 +326,8 @@ static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm,
 
        cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_SAVE_ENA_MSK);
 
-#ifdef CONFIG_IWLWIFI_DEBUGFS
-       if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_DISABLE_POWER_OFF &&
-           mvmvif->dbgfs_pm.disable_power_off)
-               cmd->flags &= cpu_to_le16(~POWER_FLAGS_POWER_SAVE_ENA_MSK);
-#endif
        if (!vif->bss_conf.ps || iwl_mvm_vif_low_latency(mvmvif) ||
-           mvm->pm_disabled)
+           !mvmvif->pm_enabled)
                return;
 
        cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK);
@@ -351,23 +369,7 @@ static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm,
                        cpu_to_le32(IWL_MVM_WOWLAN_PS_TX_DATA_TIMEOUT);
        }
 
-       if (!memcmp(mvmvif->uapsd_misbehaving_bssid, vif->bss_conf.bssid,
-                   ETH_ALEN))
-               allow_uapsd = false;
-
-       if (vif->p2p &&
-           !(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_P2P_PS_UAPSD))
-               allow_uapsd = false;
-       /*
-        * Avoid using uAPSD if P2P client is associated to GO that uses
-        * opportunistic power save. This is due to current FW limitation.
-        */
-       if (vif->p2p &&
-           vif->bss_conf.p2p_noa_attr.oppps_ctwindow &
-           IEEE80211_P2P_OPPPS_ENABLE_BIT)
-               allow_uapsd = false;
-
-       if (allow_uapsd)
+       if (iwl_mvm_power_allow_uapsd(mvm, vif))
                iwl_mvm_power_configure_uapsd(mvm, vif, cmd);
 
 #ifdef CONFIG_IWLWIFI_DEBUGFS
@@ -421,13 +423,6 @@ static int iwl_mvm_power_send_cmd(struct iwl_mvm *mvm,
 {
        struct iwl_mac_power_cmd cmd = {};
 
-       if (vif->type != NL80211_IFTYPE_STATION)
-               return 0;
-
-       if (vif->p2p &&
-           !(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BSS_P2P_PS_DCM))
-               return 0;
-
        iwl_mvm_power_build_cmd(mvm, vif, &cmd);
        iwl_mvm_power_log(mvm, &cmd);
 #ifdef CONFIG_IWLWIFI_DEBUGFS
@@ -444,12 +439,6 @@ int iwl_mvm_power_update_device(struct iwl_mvm *mvm)
                .flags = cpu_to_le16(DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK),
        };
 
-       if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_PM_CMD_SUPPORT))
-               return 0;
-
-       if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_DEVICE_PS_CMD))
-               return 0;
-
        if (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_CAM)
                mvm->ps_disabled = true;
 
@@ -508,86 +497,69 @@ int iwl_mvm_power_uapsd_misbehaving_ap_notif(struct iwl_mvm *mvm,
        return 0;
 }
 
-struct iwl_power_constraint {
+struct iwl_power_vifs {
        struct ieee80211_vif *bf_vif;
        struct ieee80211_vif *bss_vif;
        struct ieee80211_vif *p2p_vif;
-       u16 bss_phyctx_id;
-       u16 p2p_phyctx_id;
-       bool pm_disabled;
-       bool ps_disabled;
-       struct iwl_mvm *mvm;
+       struct ieee80211_vif *ap_vif;
+       struct ieee80211_vif *monitor_vif;
+       bool p2p_active;
+       bool bss_active;
+       bool ap_active;
+       bool monitor_active;
 };
 
 static void iwl_mvm_power_iterator(void *_data, u8 *mac,
                                   struct ieee80211_vif *vif)
 {
        struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
-       struct iwl_power_constraint *power_iterator = _data;
-       struct iwl_mvm *mvm = power_iterator->mvm;
+       struct iwl_power_vifs *power_iterator = _data;
 
+       mvmvif->pm_enabled = false;
        switch (ieee80211_vif_type_p2p(vif)) {
        case NL80211_IFTYPE_P2P_DEVICE:
                break;
 
        case NL80211_IFTYPE_P2P_GO:
        case NL80211_IFTYPE_AP:
-               /* no BSS power mgmt if we have an active AP */
-               if (mvmvif->ap_ibss_active)
-                       power_iterator->pm_disabled = true;
+               /* only a single MAC of the same type */
+               WARN_ON(power_iterator->ap_vif);
+               power_iterator->ap_vif = vif;
+               if (mvmvif->phy_ctxt)
+                       if (mvmvif->phy_ctxt->id < MAX_PHYS)
+                               power_iterator->ap_active = true;
                break;
 
        case NL80211_IFTYPE_MONITOR:
-               /* no BSS power mgmt and no device power save */
-               power_iterator->pm_disabled = true;
-               power_iterator->ps_disabled = true;
+               /* only a single MAC of the same type */
+               WARN_ON(power_iterator->monitor_vif);
+               power_iterator->monitor_vif = vif;
+               if (mvmvif->phy_ctxt)
+                       if (mvmvif->phy_ctxt->id < MAX_PHYS)
+                               power_iterator->monitor_active = true;
                break;
 
        case NL80211_IFTYPE_P2P_CLIENT:
-               if (mvmvif->phy_ctxt)
-                       power_iterator->p2p_phyctx_id = mvmvif->phy_ctxt->id;
-
-               /* we should have only one P2P vif */
+               /* only a single MAC of the same type */
                WARN_ON(power_iterator->p2p_vif);
                power_iterator->p2p_vif = vif;
-
-               IWL_DEBUG_POWER(mvm, "p2p: p2p_id=%d, bss_id=%d\n",
-                               power_iterator->p2p_phyctx_id,
-                               power_iterator->bss_phyctx_id);
-               if (!(mvm->fw->ucode_capa.flags &
-                     IWL_UCODE_TLV_FLAGS_BSS_P2P_PS_DCM)) {
-                       /* no BSS power mgmt if we have a P2P client*/
-                       power_iterator->pm_disabled = true;
-               } else if (power_iterator->p2p_phyctx_id < MAX_PHYS &&
-                          power_iterator->bss_phyctx_id < MAX_PHYS &&
-                          power_iterator->p2p_phyctx_id ==
-                          power_iterator->bss_phyctx_id) {
-                       power_iterator->pm_disabled = true;
-               }
+               if (mvmvif->phy_ctxt)
+                       if (mvmvif->phy_ctxt->id < MAX_PHYS)
+                               power_iterator->p2p_active = true;
                break;
 
        case NL80211_IFTYPE_STATION:
-               if (mvmvif->phy_ctxt)
-                       power_iterator->bss_phyctx_id = mvmvif->phy_ctxt->id;
-
-               /* we should have only one BSS vif */
+               /* only a single MAC of the same type */
                WARN_ON(power_iterator->bss_vif);
                power_iterator->bss_vif = vif;
+               if (mvmvif->phy_ctxt)
+                       if (mvmvif->phy_ctxt->id < MAX_PHYS)
+                               power_iterator->bss_active = true;
 
                if (mvmvif->bf_data.bf_enabled &&
                    !WARN_ON(power_iterator->bf_vif))
                        power_iterator->bf_vif = vif;
 
-               IWL_DEBUG_POWER(mvm, "bss: p2p_id=%d, bss_id=%d\n",
-                               power_iterator->p2p_phyctx_id,
-                               power_iterator->bss_phyctx_id);
-               if (mvm->fw->ucode_capa.flags &
-                   IWL_UCODE_TLV_FLAGS_BSS_P2P_PS_DCM &&
-                       (power_iterator->p2p_phyctx_id < MAX_PHYS &&
-                        power_iterator->bss_phyctx_id < MAX_PHYS &&
-                        power_iterator->p2p_phyctx_id ==
-                        power_iterator->bss_phyctx_id))
-                       power_iterator->pm_disabled = true;
                break;
 
        default:
@@ -596,70 +568,118 @@ static void iwl_mvm_power_iterator(void *_data, u8 *mac,
 }
 
 static void
-iwl_mvm_power_get_global_constraint(struct iwl_mvm *mvm,
-                                   struct iwl_power_constraint *constraint)
+iwl_mvm_power_set_pm(struct iwl_mvm *mvm,
+                                   struct iwl_power_vifs *vifs)
 {
-       lockdep_assert_held(&mvm->mutex);
+       struct iwl_mvm_vif *bss_mvmvif = NULL;
+       struct iwl_mvm_vif *p2p_mvmvif = NULL;
+       struct iwl_mvm_vif *ap_mvmvif = NULL;
+       bool client_same_channel = false;
+       bool ap_same_channel = false;
 
-       if (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_CAM) {
-               constraint->pm_disabled = true;
-               constraint->ps_disabled = true;
-       }
+       lockdep_assert_held(&mvm->mutex);
 
+       /* get vifs info + set pm_enable to false */
        ieee80211_iterate_active_interfaces_atomic(mvm->hw,
                                            IEEE80211_IFACE_ITER_NORMAL,
-                                           iwl_mvm_power_iterator, constraint);
+                                           iwl_mvm_power_iterator, vifs);
+
+       if (vifs->bss_vif)
+               bss_mvmvif = iwl_mvm_vif_from_mac80211(vifs->bss_vif);
+
+       if (vifs->p2p_vif)
+               p2p_mvmvif = iwl_mvm_vif_from_mac80211(vifs->p2p_vif);
+
+       if (vifs->ap_vif)
+               ap_mvmvif = iwl_mvm_vif_from_mac80211(vifs->ap_vif);
+
+       /* enable PM on bss if bss stand alone */
+       if (vifs->bss_active && !vifs->p2p_active && !vifs->ap_active) {
+               bss_mvmvif->pm_enabled = true;
+               return;
+       }
+
+       /* enable PM on p2p if p2p stand alone */
+       if (vifs->p2p_active && !vifs->bss_active && !vifs->ap_active) {
+               if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_P2P_PM)
+                       p2p_mvmvif->pm_enabled = true;
+               return;
+       }
+
+       if (vifs->bss_active && vifs->p2p_active)
+               client_same_channel = (bss_mvmvif->phy_ctxt->id ==
+                                      p2p_mvmvif->phy_ctxt->id);
+       if (vifs->bss_active && vifs->ap_active)
+               ap_same_channel = (bss_mvmvif->phy_ctxt->id ==
+                                  ap_mvmvif->phy_ctxt->id);
+
+       /* bss is not stand alone: enable PM if alone on its channel */
+       if (vifs->bss_active && !(client_same_channel || ap_same_channel) &&
+           (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BSS_P2P_PS_DCM)) {
+                       bss_mvmvif->pm_enabled = true;
+                       return;
+       }
+
+       /*
+        * There is only one channel in the system and there are only
+        * bss and p2p clients that share it
+        */
+       if (client_same_channel && !vifs->ap_active &&
+           (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BSS_P2P_PS_SCM)) {
+               /* share same channel*/
+               bss_mvmvif->pm_enabled = true;
+               if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_P2P_PM)
+                       p2p_mvmvif->pm_enabled = true;
+       }
 }
 
 int iwl_mvm_power_update_mac(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
 {
-       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
-       struct iwl_power_constraint constraint = {
-                   .p2p_phyctx_id = MAX_PHYS,
-                   .bss_phyctx_id = MAX_PHYS,
-                   .mvm = mvm,
-       };
+       struct iwl_mvm_vif *mvmvif;
+       struct iwl_power_vifs vifs = {};
        bool ba_enable;
        int ret;
 
        lockdep_assert_held(&mvm->mutex);
 
-       if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_PM_CMD_SUPPORT))
-               return 0;
-
-       iwl_mvm_power_get_global_constraint(mvm, &constraint);
-       mvm->ps_disabled = constraint.ps_disabled;
-       mvm->pm_disabled = constraint.pm_disabled;
+       iwl_mvm_power_set_pm(mvm, &vifs);
 
+       /* disable PS if CAM */
+       if (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_CAM) {
+               mvm->ps_disabled = true;
+       } else {
        /* don't update device power state unless we add / remove monitor */
-       if (vif->type == NL80211_IFTYPE_MONITOR) {
-               ret = iwl_mvm_power_update_device(mvm);
-               if (ret)
-                       return ret;
+               if (vifs.monitor_vif) {
+                       if (vifs.monitor_active)
+                               mvm->ps_disabled = true;
+                       ret = iwl_mvm_power_update_device(mvm);
+                       if (ret)
+                               return ret;
+               }
        }
 
-       if (constraint.bss_vif) {
-               ret = iwl_mvm_power_send_cmd(mvm, constraint.bss_vif);
+       if (vifs.bss_vif) {
+               ret = iwl_mvm_power_send_cmd(mvm, vifs.bss_vif);
                if (ret)
                        return ret;
        }
 
-       if (constraint.p2p_vif) {
-               ret = iwl_mvm_power_send_cmd(mvm, constraint.p2p_vif);
+       if (vifs.p2p_vif) {
+               ret = iwl_mvm_power_send_cmd(mvm, vifs.p2p_vif);
                if (ret)
                        return ret;
        }
 
-       if (!constraint.bf_vif)
+       if (!vifs.bf_vif)
                return 0;
 
-       vif = constraint.bf_vif;
+       vif = vifs.bf_vif;
        mvmvif = iwl_mvm_vif_from_mac80211(vif);
 
-       ba_enable = !(constraint.pm_disabled || constraint.ps_disabled ||
+       ba_enable = !(!mvmvif->pm_enabled || mvm->ps_disabled ||
                      !vif->bss_conf.ps || iwl_mvm_vif_low_latency(mvmvif));
 
-       return iwl_mvm_update_beacon_abort(mvm, constraint.bf_vif, ba_enable);
+       return iwl_mvm_update_beacon_abort(mvm, vifs.bf_vif, ba_enable);
 }
 
 #ifdef CONFIG_IWLWIFI_DEBUGFS
@@ -671,19 +691,10 @@ int iwl_mvm_power_mac_dbgfs_read(struct iwl_mvm *mvm,
        struct iwl_mac_power_cmd cmd = {};
        int pos = 0;
 
-       if (WARN_ON(!(mvm->fw->ucode_capa.flags &
-                     IWL_UCODE_TLV_FLAGS_PM_CMD_SUPPORT)))
-               return 0;
-
        mutex_lock(&mvm->mutex);
        memcpy(&cmd, &mvmvif->mac_pwr_cmd, sizeof(cmd));
        mutex_unlock(&mvm->mutex);
 
-       if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_DEVICE_PS_CMD))
-               pos += scnprintf(buf+pos, bufsz-pos, "disable_power_off = %d\n",
-                                (cmd.flags &
-                                cpu_to_le16(POWER_FLAGS_POWER_SAVE_ENA_MSK)) ?
-                                0 : 1);
        pos += scnprintf(buf+pos, bufsz-pos, "power_scheme = %d\n",
                         iwlmvm_mod_params.power_scheme);
        pos += scnprintf(buf+pos, bufsz-pos, "flags = 0x%x\n",
@@ -826,8 +837,7 @@ int iwl_mvm_disable_beacon_filter(struct iwl_mvm *mvm,
        struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
        int ret;
 
-       if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BF_UPDATED) ||
-           vif->type != NL80211_IFTYPE_STATION || vif->p2p)
+       if (vif->type != NL80211_IFTYPE_STATION || vif->p2p)
                return 0;
 
        ret = iwl_mvm_beacon_filter_send_cmd(mvm, &cmd, flags);
@@ -914,13 +924,3 @@ int iwl_mvm_update_beacon_filter(struct iwl_mvm *mvm,
 
        return iwl_mvm_enable_beacon_filter(mvm, vif, flags);
 }
-
-int iwl_power_legacy_set_cam_mode(struct iwl_mvm *mvm)
-{
-       struct iwl_powertable_cmd cmd = {
-               .keep_alive_seconds = POWER_KEEP_ALIVE_PERIOD_SEC,
-       };
-
-       return iwl_mvm_send_cmd_pdu(mvm, POWER_TABLE_CMD, CMD_SYNC,
-                                   sizeof(cmd), &cmd);
-}
index 9f52c5b3f0ec0e9b2da5949f2af88bbcd13d89ce..d44b2b33b5ccca71b1f1e6ad0ec6ab2573fdd083 100644 (file)
@@ -527,6 +527,9 @@ static void rs_rate_scale_clear_tbl_windows(struct iwl_mvm *mvm,
        IWL_DEBUG_RATE(mvm, "Clearing up window stats\n");
        for (i = 0; i < IWL_RATE_COUNT; i++)
                rs_rate_scale_clear_window(&tbl->win[i]);
+
+       for (i = 0; i < ARRAY_SIZE(tbl->tpc_win); i++)
+               rs_rate_scale_clear_window(&tbl->tpc_win[i]);
 }
 
 static inline u8 rs_is_valid_ant(u8 valid_antenna, u8 ant_type)
@@ -656,17 +659,34 @@ static int _rs_collect_tx_data(struct iwl_scale_tbl_info *tbl,
        return 0;
 }
 
-static int rs_collect_tx_data(struct iwl_scale_tbl_info *tbl,
-                             int scale_index, int attempts, int successes)
+static int rs_collect_tx_data(struct iwl_lq_sta *lq_sta,
+                             struct iwl_scale_tbl_info *tbl,
+                             int scale_index, int attempts, int successes,
+                             u8 reduced_txp)
 {
        struct iwl_rate_scale_data *window = NULL;
+       int ret;
 
        if (scale_index < 0 || scale_index >= IWL_RATE_COUNT)
                return -EINVAL;
 
+       if (tbl->column != RS_COLUMN_INVALID) {
+               lq_sta->tx_stats[tbl->column][scale_index].total += attempts;
+               lq_sta->tx_stats[tbl->column][scale_index].success += successes;
+       }
+
        /* Select window for current tx bit rate */
        window = &(tbl->win[scale_index]);
 
+       ret = _rs_collect_tx_data(tbl, scale_index, attempts, successes,
+                                 window);
+       if (ret)
+               return ret;
+
+       if (WARN_ON_ONCE(reduced_txp > TPC_MAX_REDUCTION))
+               return -EINVAL;
+
+       window = &tbl->tpc_win[reduced_txp];
        return _rs_collect_tx_data(tbl, scale_index, attempts, successes,
                                   window);
 }
@@ -1000,6 +1020,7 @@ static void rs_tx_status(void *mvm_r, struct ieee80211_supported_band *sband,
        u32 ucode_rate;
        struct rs_rate rate;
        struct iwl_scale_tbl_info *curr_tbl, *other_tbl, *tmp_tbl;
+       u8 reduced_txp = (uintptr_t)info->status.status_driver_data[0];
 
        /* Treat uninitialized rate scaling data same as non-existing. */
        if (!lq_sta) {
@@ -1141,9 +1162,10 @@ static void rs_tx_status(void *mvm_r, struct ieee80211_supported_band *sband,
        if (info->flags & IEEE80211_TX_STAT_AMPDU) {
                ucode_rate = le32_to_cpu(table->rs_table[0]);
                rs_rate_from_ucode_rate(ucode_rate, info->band, &rate);
-               rs_collect_tx_data(curr_tbl, rate.index,
+               rs_collect_tx_data(lq_sta, curr_tbl, rate.index,
                                   info->status.ampdu_len,
-                                  info->status.ampdu_ack_len);
+                                  info->status.ampdu_ack_len,
+                                  reduced_txp);
 
                /* Update success/fail counts if not searching for new mode */
                if (lq_sta->rs_state == RS_STATE_STAY_IN_COLUMN) {
@@ -1176,8 +1198,9 @@ static void rs_tx_status(void *mvm_r, struct ieee80211_supported_band *sband,
                        else
                                continue;
 
-                       rs_collect_tx_data(tmp_tbl, rate.index, 1,
-                                          i < retries ? 0 : legacy_success);
+                       rs_collect_tx_data(lq_sta, tmp_tbl, rate.index, 1,
+                                          i < retries ? 0 : legacy_success,
+                                          reduced_txp);
                }
 
                /* Update success/fail counts if not searching for new mode */
@@ -1188,6 +1211,7 @@ static void rs_tx_status(void *mvm_r, struct ieee80211_supported_band *sband,
        }
        /* The last TX rate is cached in lq_sta; it's set in if/else above */
        lq_sta->last_rate_n_flags = ucode_rate;
+       IWL_DEBUG_RATE(mvm, "reduced txpower: %d\n", reduced_txp);
 done:
        /* See if there's a better rate or modulation mode to try. */
        if (sta && sta->supp_rates[sband->band])
@@ -1769,6 +1793,198 @@ out:
        return action;
 }
 
+static void rs_get_adjacent_txp(struct iwl_mvm *mvm, int index,
+                               int *weaker, int *stronger)
+{
+       *weaker = index + TPC_TX_POWER_STEP;
+       if (*weaker > TPC_MAX_REDUCTION)
+               *weaker = TPC_INVALID;
+
+       *stronger = index - TPC_TX_POWER_STEP;
+       if (*stronger < 0)
+               *stronger = TPC_INVALID;
+}
+
+static bool rs_tpc_allowed(struct iwl_mvm *mvm, struct rs_rate *rate,
+                          enum ieee80211_band band)
+{
+       int index = rate->index;
+
+       /*
+        * allow tpc only if power management is enabled, or bt coex
+        * activity grade allows it and we are on 2.4Ghz.
+        */
+       if (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_CAM &&
+           !iwl_mvm_bt_coex_is_tpc_allowed(mvm, band))
+               return false;
+
+       IWL_DEBUG_RATE(mvm, "check rate, table type: %d\n", rate->type);
+       if (is_legacy(rate))
+               return index == IWL_RATE_54M_INDEX;
+       if (is_ht(rate))
+               return index == IWL_RATE_MCS_7_INDEX;
+       if (is_vht(rate))
+               return index == IWL_RATE_MCS_7_INDEX ||
+                      index == IWL_RATE_MCS_8_INDEX ||
+                      index == IWL_RATE_MCS_9_INDEX;
+
+       WARN_ON_ONCE(1);
+       return false;
+}
+
+enum tpc_action {
+       TPC_ACTION_STAY,
+       TPC_ACTION_DECREASE,
+       TPC_ACTION_INCREASE,
+       TPC_ACTION_NO_RESTIRCTION,
+};
+
+static enum tpc_action rs_get_tpc_action(struct iwl_mvm *mvm,
+                                        s32 sr, int weak, int strong,
+                                        int current_tpt,
+                                        int weak_tpt, int strong_tpt)
+{
+       /* stay until we have valid tpt */
+       if (current_tpt == IWL_INVALID_VALUE) {
+               IWL_DEBUG_RATE(mvm, "no current tpt. stay.\n");
+               return TPC_ACTION_STAY;
+       }
+
+       /* Too many failures, increase txp */
+       if (sr <= TPC_SR_FORCE_INCREASE || current_tpt == 0) {
+               IWL_DEBUG_RATE(mvm, "increase txp because of weak SR\n");
+               return TPC_ACTION_NO_RESTIRCTION;
+       }
+
+       /* try decreasing first if applicable */
+       if (weak != TPC_INVALID) {
+               if (weak_tpt == IWL_INVALID_VALUE &&
+                   (strong_tpt == IWL_INVALID_VALUE ||
+                    current_tpt >= strong_tpt)) {
+                       IWL_DEBUG_RATE(mvm,
+                                      "no weak txp measurement. decrease txp\n");
+                       return TPC_ACTION_DECREASE;
+               }
+
+               if (weak_tpt > current_tpt) {
+                       IWL_DEBUG_RATE(mvm,
+                                      "lower txp has better tpt. decrease txp\n");
+                       return TPC_ACTION_DECREASE;
+               }
+       }
+
+       /* next, increase if needed */
+       if (sr < TPC_SR_NO_INCREASE && strong != TPC_INVALID) {
+               if (weak_tpt == IWL_INVALID_VALUE &&
+                   strong_tpt != IWL_INVALID_VALUE &&
+                   current_tpt < strong_tpt) {
+                       IWL_DEBUG_RATE(mvm,
+                                      "higher txp has better tpt. increase txp\n");
+                       return TPC_ACTION_INCREASE;
+               }
+
+               if (weak_tpt < current_tpt &&
+                   (strong_tpt == IWL_INVALID_VALUE ||
+                    strong_tpt > current_tpt)) {
+                       IWL_DEBUG_RATE(mvm,
+                                      "lower txp has worse tpt. increase txp\n");
+                       return TPC_ACTION_INCREASE;
+               }
+       }
+
+       IWL_DEBUG_RATE(mvm, "no need to increase or decrease txp - stay\n");
+       return TPC_ACTION_STAY;
+}
+
+static bool rs_tpc_perform(struct iwl_mvm *mvm,
+                          struct ieee80211_sta *sta,
+                          struct iwl_lq_sta *lq_sta,
+                          struct iwl_scale_tbl_info *tbl)
+{
+       struct iwl_mvm_sta *mvm_sta = (void *)sta->drv_priv;
+       struct ieee80211_vif *vif = mvm_sta->vif;
+       struct ieee80211_chanctx_conf *chanctx_conf;
+       enum ieee80211_band band;
+       struct iwl_rate_scale_data *window;
+       struct rs_rate *rate = &tbl->rate;
+       enum tpc_action action;
+       s32 sr;
+       u8 cur = lq_sta->lq.reduced_tpc;
+       int current_tpt;
+       int weak, strong;
+       int weak_tpt = IWL_INVALID_VALUE, strong_tpt = IWL_INVALID_VALUE;
+
+#ifdef CONFIG_MAC80211_DEBUGFS
+       if (lq_sta->dbg_fixed_txp_reduction <= TPC_MAX_REDUCTION) {
+               IWL_DEBUG_RATE(mvm, "fixed tpc: %d",
+                              lq_sta->dbg_fixed_txp_reduction);
+               lq_sta->lq.reduced_tpc = lq_sta->dbg_fixed_txp_reduction;
+               return cur != lq_sta->dbg_fixed_txp_reduction;
+       }
+#endif
+
+       rcu_read_lock();
+       chanctx_conf = rcu_dereference(vif->chanctx_conf);
+       if (WARN_ON(!chanctx_conf))
+               band = IEEE80211_NUM_BANDS;
+       else
+               band = chanctx_conf->def.chan->band;
+       rcu_read_unlock();
+
+       if (!rs_tpc_allowed(mvm, rate, band)) {
+               IWL_DEBUG_RATE(mvm,
+                              "tpc is not allowed. remove txp restrictions");
+               lq_sta->lq.reduced_tpc = TPC_NO_REDUCTION;
+               return cur != TPC_NO_REDUCTION;
+       }
+
+       rs_get_adjacent_txp(mvm, cur, &weak, &strong);
+
+       /* Collect measured throughputs for current and adjacent rates */
+       window = tbl->tpc_win;
+       sr = window[cur].success_ratio;
+       current_tpt = window[cur].average_tpt;
+       if (weak != TPC_INVALID)
+               weak_tpt = window[weak].average_tpt;
+       if (strong != TPC_INVALID)
+               strong_tpt = window[strong].average_tpt;
+
+       IWL_DEBUG_RATE(mvm,
+                      "(TPC: %d): cur_tpt %d SR %d weak %d strong %d weak_tpt %d strong_tpt %d\n",
+                      cur, current_tpt, sr, weak, strong,
+                      weak_tpt, strong_tpt);
+
+       action = rs_get_tpc_action(mvm, sr, weak, strong,
+                                  current_tpt, weak_tpt, strong_tpt);
+
+       /* override actions if we are on the edge */
+       if (weak == TPC_INVALID && action == TPC_ACTION_DECREASE) {
+               IWL_DEBUG_RATE(mvm, "already in lowest txp, stay");
+               action = TPC_ACTION_STAY;
+       } else if (strong == TPC_INVALID &&
+                  (action == TPC_ACTION_INCREASE ||
+                   action == TPC_ACTION_NO_RESTIRCTION)) {
+               IWL_DEBUG_RATE(mvm, "already in highest txp, stay");
+               action = TPC_ACTION_STAY;
+       }
+
+       switch (action) {
+       case TPC_ACTION_DECREASE:
+               lq_sta->lq.reduced_tpc = weak;
+               return true;
+       case TPC_ACTION_INCREASE:
+               lq_sta->lq.reduced_tpc = strong;
+               return true;
+       case TPC_ACTION_NO_RESTIRCTION:
+               lq_sta->lq.reduced_tpc = TPC_NO_REDUCTION;
+               return true;
+       case TPC_ACTION_STAY:
+               /* do nothing */
+               break;
+       }
+       return false;
+}
+
 /*
  * Do rate scaling and search for new modulation mode.
  */
@@ -2019,6 +2235,8 @@ static void rs_rate_scale_perform(struct iwl_mvm *mvm,
                break;
        case RS_ACTION_STAY:
                /* No change */
+               update_lq = rs_tpc_perform(mvm, sta, lq_sta, tbl);
+               break;
        default:
                break;
        }
@@ -2478,6 +2696,7 @@ void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
        lq_sta->is_agg = 0;
 #ifdef CONFIG_MAC80211_DEBUGFS
        lq_sta->dbg_fixed_rate = 0;
+       lq_sta->dbg_fixed_txp_reduction = TPC_INVALID;
 #endif
 #ifdef CONFIG_IWLWIFI_DEBUGFS
        iwl_mvm_reset_frame_stats(mvm, &mvm->drv_rx_stats);
@@ -2653,6 +2872,7 @@ static void rs_fill_lq_cmd(struct iwl_mvm *mvm,
                rs_build_rates_table_from_fixed(mvm, lq_cmd,
                                                lq_sta->band,
                                                lq_sta->dbg_fixed_rate);
+               lq_cmd->reduced_tpc = 0;
                ant = (lq_sta->dbg_fixed_rate & RATE_MCS_ANT_ABC_MSK) >>
                        RATE_MCS_ANT_POS;
        } else
@@ -2783,7 +3003,6 @@ static ssize_t rs_sta_dbgfs_scale_table_write(struct file *file,
        size_t buf_size;
        u32 parsed_rate;
 
-
        mvm = lq_sta->drv;
        memset(buf, 0, sizeof(buf));
        buf_size = min(count, sizeof(buf) -  1);
@@ -2856,6 +3075,7 @@ static ssize_t rs_sta_dbgfs_scale_table_read(struct file *file,
                        lq_sta->lq.agg_disable_start_th,
                        lq_sta->lq.agg_frame_cnt_limit);
 
+       desc += sprintf(buff+desc, "reduced tpc=%d\n", lq_sta->lq.reduced_tpc);
        desc += sprintf(buff+desc,
                        "Start idx [0]=0x%x [1]=0x%x [2]=0x%x [3]=0x%x\n",
                        lq_sta->lq.initial_rate_index[0],
@@ -2928,6 +3148,94 @@ static const struct file_operations rs_sta_dbgfs_stats_table_ops = {
        .llseek = default_llseek,
 };
 
+static ssize_t rs_sta_dbgfs_drv_tx_stats_read(struct file *file,
+                                             char __user *user_buf,
+                                             size_t count, loff_t *ppos)
+{
+       static const char * const column_name[] = {
+               [RS_COLUMN_LEGACY_ANT_A] = "LEGACY_ANT_A",
+               [RS_COLUMN_LEGACY_ANT_B] = "LEGACY_ANT_B",
+               [RS_COLUMN_SISO_ANT_A] = "SISO_ANT_A",
+               [RS_COLUMN_SISO_ANT_B] = "SISO_ANT_B",
+               [RS_COLUMN_SISO_ANT_A_SGI] = "SISO_ANT_A_SGI",
+               [RS_COLUMN_SISO_ANT_B_SGI] = "SISO_ANT_B_SGI",
+               [RS_COLUMN_MIMO2] = "MIMO2",
+               [RS_COLUMN_MIMO2_SGI] = "MIMO2_SGI",
+       };
+
+       static const char * const rate_name[] = {
+               [IWL_RATE_1M_INDEX] = "1M",
+               [IWL_RATE_2M_INDEX] = "2M",
+               [IWL_RATE_5M_INDEX] = "5.5M",
+               [IWL_RATE_11M_INDEX] = "11M",
+               [IWL_RATE_6M_INDEX] = "6M|MCS0",
+               [IWL_RATE_9M_INDEX] = "9M",
+               [IWL_RATE_12M_INDEX] = "12M|MCS1",
+               [IWL_RATE_18M_INDEX] = "18M|MCS2",
+               [IWL_RATE_24M_INDEX] = "24M|MCS3",
+               [IWL_RATE_36M_INDEX] = "36M|MCS4",
+               [IWL_RATE_48M_INDEX] = "48M|MCS5",
+               [IWL_RATE_54M_INDEX] = "54M|MCS6",
+               [IWL_RATE_MCS_7_INDEX] = "MCS7",
+               [IWL_RATE_MCS_8_INDEX] = "MCS8",
+               [IWL_RATE_MCS_9_INDEX] = "MCS9",
+       };
+
+       char *buff, *pos, *endpos;
+       int col, rate;
+       ssize_t ret;
+       struct iwl_lq_sta *lq_sta = file->private_data;
+       struct rs_rate_stats *stats;
+       static const size_t bufsz = 1024;
+
+       buff = kmalloc(bufsz, GFP_KERNEL);
+       if (!buff)
+               return -ENOMEM;
+
+       pos = buff;
+       endpos = pos + bufsz;
+
+       pos += scnprintf(pos, endpos - pos, "COLUMN,");
+       for (rate = 0; rate < IWL_RATE_COUNT; rate++)
+               pos += scnprintf(pos, endpos - pos, "%s,", rate_name[rate]);
+       pos += scnprintf(pos, endpos - pos, "\n");
+
+       for (col = 0; col < RS_COLUMN_COUNT; col++) {
+               pos += scnprintf(pos, endpos - pos,
+                                "%s,", column_name[col]);
+
+               for (rate = 0; rate < IWL_RATE_COUNT; rate++) {
+                       stats = &(lq_sta->tx_stats[col][rate]);
+                       pos += scnprintf(pos, endpos - pos,
+                                        "%llu/%llu,",
+                                        stats->success,
+                                        stats->total);
+               }
+               pos += scnprintf(pos, endpos - pos, "\n");
+       }
+
+       ret = simple_read_from_buffer(user_buf, count, ppos, buff, pos - buff);
+       kfree(buff);
+       return ret;
+}
+
+static ssize_t rs_sta_dbgfs_drv_tx_stats_write(struct file *file,
+                                              const char __user *user_buf,
+                                              size_t count, loff_t *ppos)
+{
+       struct iwl_lq_sta *lq_sta = file->private_data;
+       memset(lq_sta->tx_stats, 0, sizeof(lq_sta->tx_stats));
+
+       return count;
+}
+
+static const struct file_operations rs_sta_dbgfs_drv_tx_stats_ops = {
+       .read = rs_sta_dbgfs_drv_tx_stats_read,
+       .write = rs_sta_dbgfs_drv_tx_stats_write,
+       .open = simple_open,
+       .llseek = default_llseek,
+};
+
 static void rs_add_debugfs(void *mvm, void *mvm_sta, struct dentry *dir)
 {
        struct iwl_lq_sta *lq_sta = mvm_sta;
@@ -2937,9 +3245,15 @@ static void rs_add_debugfs(void *mvm, void *mvm_sta, struct dentry *dir)
        lq_sta->rs_sta_dbgfs_stats_table_file =
                debugfs_create_file("rate_stats_table", S_IRUSR, dir,
                                    lq_sta, &rs_sta_dbgfs_stats_table_ops);
+       lq_sta->rs_sta_dbgfs_drv_tx_stats_file =
+               debugfs_create_file("drv_tx_stats", S_IRUSR | S_IWUSR, dir,
+                                   lq_sta, &rs_sta_dbgfs_drv_tx_stats_ops);
        lq_sta->rs_sta_dbgfs_tx_agg_tid_en_file =
                debugfs_create_u8("tx_agg_tid_enable", S_IRUSR | S_IWUSR, dir,
                                  &lq_sta->tx_agg_tid_en);
+       lq_sta->rs_sta_dbgfs_reduced_txp_file =
+               debugfs_create_u8("reduced_tpc", S_IRUSR | S_IWUSR, dir,
+                                 &lq_sta->dbg_fixed_txp_reduction);
 }
 
 static void rs_remove_debugfs(void *mvm, void *mvm_sta)
@@ -2947,7 +3261,9 @@ static void rs_remove_debugfs(void *mvm, void *mvm_sta)
        struct iwl_lq_sta *lq_sta = mvm_sta;
        debugfs_remove(lq_sta->rs_sta_dbgfs_scale_table_file);
        debugfs_remove(lq_sta->rs_sta_dbgfs_stats_table_file);
+       debugfs_remove(lq_sta->rs_sta_dbgfs_drv_tx_stats_file);
        debugfs_remove(lq_sta->rs_sta_dbgfs_tx_agg_tid_en_file);
+       debugfs_remove(lq_sta->rs_sta_dbgfs_reduced_txp_file);
 }
 #endif
 
index 0acfac96a56c6dca2d2799812231404921abd15b..374a83d7db25a98dd76da34d3fdff9557e48664f 100644 (file)
@@ -158,6 +158,13 @@ enum {
 #define RS_SR_FORCE_DECREASE           1920    /*  15% */
 #define RS_SR_NO_DECREASE              10880   /*  85% */
 
+#define TPC_SR_FORCE_INCREASE          9600    /* 75% */
+#define TPC_SR_NO_INCREASE             10880   /* 85% */
+#define TPC_TX_POWER_STEP              3
+#define TPC_MAX_REDUCTION              15
+#define TPC_NO_REDUCTION               0
+#define TPC_INVALID                    0xff
+
 #define LINK_QUAL_AGG_TIME_LIMIT_DEF   (4000) /* 4 milliseconds */
 #define LINK_QUAL_AGG_TIME_LIMIT_MAX   (8000)
 #define LINK_QUAL_AGG_TIME_LIMIT_MIN   (100)
@@ -266,9 +273,16 @@ enum rs_column {
        RS_COLUMN_MIMO2_SGI,
 
        RS_COLUMN_LAST = RS_COLUMN_MIMO2_SGI,
+       RS_COLUMN_COUNT = RS_COLUMN_LAST + 1,
        RS_COLUMN_INVALID,
 };
 
+/* Packet stats per rate */
+struct rs_rate_stats {
+       u64 success;
+       u64 total;
+};
+
 /**
  * struct iwl_scale_tbl_info -- tx params and success history for all rates
  *
@@ -280,6 +294,8 @@ struct iwl_scale_tbl_info {
        enum rs_column column;
        const u16 *expected_tpt;        /* throughput metrics; expected_tpt_G, etc. */
        struct iwl_rate_scale_data win[IWL_RATE_COUNT]; /* rate histories */
+       /* per txpower-reduction history */
+       struct iwl_rate_scale_data tpc_win[TPC_MAX_REDUCTION + 1];
 };
 
 enum {
@@ -315,6 +331,8 @@ struct iwl_lq_sta {
        bool is_vht;
        enum ieee80211_band band;
 
+       struct rs_rate_stats tx_stats[RS_COLUMN_COUNT][IWL_RATE_COUNT];
+
        /* The following are bitmaps of rates; IWL_RATE_6M_MASK, etc. */
        unsigned long active_legacy_rate;
        unsigned long active_siso_rate;
@@ -334,8 +352,11 @@ struct iwl_lq_sta {
 #ifdef CONFIG_MAC80211_DEBUGFS
        struct dentry *rs_sta_dbgfs_scale_table_file;
        struct dentry *rs_sta_dbgfs_stats_table_file;
+       struct dentry *rs_sta_dbgfs_drv_tx_stats_file;
        struct dentry *rs_sta_dbgfs_tx_agg_tid_en_file;
+       struct dentry *rs_sta_dbgfs_reduced_txp_file;
        u32 dbg_fixed_rate;
+       u8 dbg_fixed_txp_reduction;
 #endif
        struct iwl_mvm *drv;
 
@@ -345,6 +366,9 @@ struct iwl_lq_sta {
        u32 last_rate_n_flags;
        /* packets destined for this STA are aggregated */
        u8 is_agg;
+
+       /* tx power reduce for this sta */
+       int tpc_reduce;
 };
 
 /* Initialize station's rate scaling information after adding station */
index 6061553a5e444956c7b5d626695a2950fb1f3fd1..cf7276967acdec6439392c82e44e94de52d453c8 100644 (file)
@@ -60,7 +60,6 @@
  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  *****************************************************************************/
 #include "iwl-trans.h"
-
 #include "mvm.h"
 #include "fw-api.h"
 
@@ -130,42 +129,7 @@ static void iwl_mvm_pass_packet_to_mac80211(struct iwl_mvm *mvm,
 
        memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
 
-       ieee80211_rx_ni(mvm->hw, skb);
-}
-
-static void iwl_mvm_calc_rssi(struct iwl_mvm *mvm,
-                             struct iwl_rx_phy_info *phy_info,
-                             struct ieee80211_rx_status *rx_status)
-{
-       int rssi_a, rssi_b, rssi_a_dbm, rssi_b_dbm, max_rssi_dbm;
-       u32 agc_a, agc_b;
-       u32 val;
-
-       val = le32_to_cpu(phy_info->non_cfg_phy[IWL_RX_INFO_AGC_IDX]);
-       agc_a = (val & IWL_OFDM_AGC_A_MSK) >> IWL_OFDM_AGC_A_POS;
-       agc_b = (val & IWL_OFDM_AGC_B_MSK) >> IWL_OFDM_AGC_B_POS;
-
-       val = le32_to_cpu(phy_info->non_cfg_phy[IWL_RX_INFO_RSSI_AB_IDX]);
-       rssi_a = (val & IWL_OFDM_RSSI_INBAND_A_MSK) >> IWL_OFDM_RSSI_A_POS;
-       rssi_b = (val & IWL_OFDM_RSSI_INBAND_B_MSK) >> IWL_OFDM_RSSI_B_POS;
-
-       /*
-        * dBm = rssi dB - agc dB - constant.
-        * Higher AGC (higher radio gain) means lower signal.
-        */
-       rssi_a_dbm = rssi_a - IWL_RSSI_OFFSET - agc_a;
-       rssi_b_dbm = rssi_b - IWL_RSSI_OFFSET - agc_b;
-       max_rssi_dbm = max_t(int, rssi_a_dbm, rssi_b_dbm);
-
-       IWL_DEBUG_STATS(mvm, "Rssi In A %d B %d Max %d AGCA %d AGCB %d\n",
-                       rssi_a_dbm, rssi_b_dbm, max_rssi_dbm, agc_a, agc_b);
-
-       rx_status->signal = max_rssi_dbm;
-       rx_status->chains = (le16_to_cpu(phy_info->phy_flags) &
-                               RX_RES_PHY_FLAGS_ANTENNA)
-                                       >> RX_RES_PHY_FLAGS_ANTENNA_POS;
-       rx_status->chain_signal[0] = rssi_a_dbm;
-       rx_status->chain_signal[1] = rssi_b_dbm;
+       ieee80211_rx(mvm->hw, skb);
 }
 
 /*
@@ -337,10 +301,7 @@ int iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
         */
        /*rx_status.flag |= RX_FLAG_MACTIME_MPDU;*/
 
-       if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_RX_ENERGY_API)
-               iwl_mvm_get_signal_strength(mvm, phy_info, &rx_status);
-       else
-               iwl_mvm_calc_rssi(mvm, phy_info, &rx_status);
+       iwl_mvm_get_signal_strength(mvm, phy_info, &rx_status);
 
        IWL_DEBUG_STATS_LIMIT(mvm, "Rssi %d, TSF %llu\n", rx_status.signal,
                              (unsigned long long)rx_status.mactime);
@@ -394,6 +355,8 @@ int iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
                rx_status.rate_idx = rate_n_flags & RATE_VHT_MCS_RATE_CODE_MSK;
                rx_status.flag |= RX_FLAG_VHT;
                rx_status.flag |= stbc << RX_FLAG_STBC_SHIFT;
+               if (rate_n_flags & RATE_MCS_BF_MSK)
+                       rx_status.vht_flag |= RX_VHT_FLAG_BF;
        } else {
                rx_status.rate_idx =
                        iwl_mvm_legacy_rate_to_mac80211_idx(rate_n_flags,
index c91dc8498852c46653cc43fddb57c382d3d7f3f0..63e7b16edb552be4b626fca9d0b64b064adc6fd2 100644 (file)
@@ -348,7 +348,10 @@ int iwl_mvm_scan_request(struct iwl_mvm *mvm,
        struct iwl_mvm_scan_params params = {};
 
        lockdep_assert_held(&mvm->mutex);
-       BUG_ON(mvm->scan_cmd == NULL);
+
+       /* we should have failed registration if scan_cmd was NULL */
+       if (WARN_ON(mvm->scan_cmd == NULL))
+               return -ENOMEM;
 
        IWL_DEBUG_SCAN(mvm, "Handling mac80211 scan request\n");
        mvm->scan_status = IWL_MVM_SCAN_OS;
@@ -567,9 +570,13 @@ int iwl_mvm_rx_scan_offload_complete_notif(struct iwl_mvm *mvm,
        /* scan status must be locked for proper checking */
        lockdep_assert_held(&mvm->mutex);
 
-       IWL_DEBUG_SCAN(mvm, "Scheduled scan completed, status %s\n",
+       IWL_DEBUG_SCAN(mvm,
+                      "Scheduled scan completed, status %s EBS status %s:%d\n",
                       scan_notif->status == IWL_SCAN_OFFLOAD_COMPLETED ?
-                      "completed" : "aborted");
+                      "completed" : "aborted", scan_notif->ebs_status ==
+                      IWL_SCAN_EBS_SUCCESS ? "success" : "failed",
+                      scan_notif->ebs_status);
+
 
        /* only call mac80211 completion if the stop was initiated by FW */
        if (mvm->scan_status == IWL_MVM_SCAN_SCHED) {
@@ -577,6 +584,8 @@ int iwl_mvm_rx_scan_offload_complete_notif(struct iwl_mvm *mvm,
                ieee80211_sched_scan_stopped(mvm->hw);
        }
 
+       mvm->last_ebs_successful = !scan_notif->ebs_status;
+
        return 0;
 }
 
@@ -913,6 +922,11 @@ int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm,
                scan_req.flags |= cpu_to_le16(IWL_SCAN_OFFLOAD_FLAG_PASS_ALL);
        }
 
+       if (mvm->last_ebs_successful &&
+           mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_EBS_SUPPORT)
+               scan_req.flags |=
+                       cpu_to_le16(IWL_SCAN_OFFLOAD_FLAG_EBS_ACCURATE_MODE);
+
        return iwl_mvm_send_cmd_pdu(mvm, SCAN_OFFLOAD_REQUEST_CMD, CMD_SYNC,
                                    sizeof(scan_req), &scan_req);
 }
index 88809b2d165445fcf9188c8f91bcf755a9e6704f..7edfd15efc9d001f227ea2c35b046c0f47cb55af 100644 (file)
@@ -237,9 +237,6 @@ int iwl_mvm_sf_update(struct iwl_mvm *mvm, struct ieee80211_vif *changed_vif,
                .sta_vif_ap_sta_id = IWL_MVM_STATION_COUNT,
        };
 
-       if (IWL_UCODE_API(mvm->fw->ucode_ver) < 8)
-               return 0;
-
        /*
         * Ignore the call if we are in HW Restart flow, or if the handled
         * vif is a p2p device.
index f339ef8842508774e2ff7d51c9bdce069e014a1b..3e11b9d802e75981ad5731955d9367996a8cec47 100644 (file)
 #include "sta.h"
 #include "rs.h"
 
-static void iwl_mvm_add_sta_cmd_v7_to_v5(struct iwl_mvm_add_sta_cmd_v7 *cmd_v7,
-                                        struct iwl_mvm_add_sta_cmd_v5 *cmd_v5)
-{
-       memset(cmd_v5, 0, sizeof(*cmd_v5));
-
-       cmd_v5->add_modify = cmd_v7->add_modify;
-       cmd_v5->tid_disable_tx = cmd_v7->tid_disable_tx;
-       cmd_v5->mac_id_n_color = cmd_v7->mac_id_n_color;
-       memcpy(cmd_v5->addr, cmd_v7->addr, ETH_ALEN);
-       cmd_v5->sta_id = cmd_v7->sta_id;
-       cmd_v5->modify_mask = cmd_v7->modify_mask;
-       cmd_v5->station_flags = cmd_v7->station_flags;
-       cmd_v5->station_flags_msk = cmd_v7->station_flags_msk;
-       cmd_v5->add_immediate_ba_tid = cmd_v7->add_immediate_ba_tid;
-       cmd_v5->remove_immediate_ba_tid = cmd_v7->remove_immediate_ba_tid;
-       cmd_v5->add_immediate_ba_ssn = cmd_v7->add_immediate_ba_ssn;
-       cmd_v5->sleep_tx_count = cmd_v7->sleep_tx_count;
-       cmd_v5->sleep_state_flags = cmd_v7->sleep_state_flags;
-       cmd_v5->assoc_id = cmd_v7->assoc_id;
-       cmd_v5->beamform_flags = cmd_v7->beamform_flags;
-       cmd_v5->tfd_queue_msk = cmd_v7->tfd_queue_msk;
-}
-
-static void
-iwl_mvm_add_sta_key_to_add_sta_cmd_v5(struct iwl_mvm_add_sta_key_cmd *key_cmd,
-                                     struct iwl_mvm_add_sta_cmd_v5 *sta_cmd,
-                                     u32 mac_id_n_color)
-{
-       memset(sta_cmd, 0, sizeof(*sta_cmd));
-
-       sta_cmd->sta_id = key_cmd->sta_id;
-       sta_cmd->add_modify = STA_MODE_MODIFY;
-       sta_cmd->modify_mask = STA_MODIFY_KEY;
-       sta_cmd->mac_id_n_color = cpu_to_le32(mac_id_n_color);
-
-       sta_cmd->key.key_offset = key_cmd->key_offset;
-       sta_cmd->key.key_flags = key_cmd->key_flags;
-       memcpy(sta_cmd->key.key, key_cmd->key, sizeof(sta_cmd->key.key));
-       sta_cmd->key.tkip_rx_tsc_byte2 = key_cmd->tkip_rx_tsc_byte2;
-       memcpy(sta_cmd->key.tkip_rx_ttak, key_cmd->tkip_rx_ttak,
-              sizeof(sta_cmd->key.tkip_rx_ttak));
-}
-
-static int iwl_mvm_send_add_sta_cmd_status(struct iwl_mvm *mvm,
-                                          struct iwl_mvm_add_sta_cmd_v7 *cmd,
-                                          int *status)
-{
-       struct iwl_mvm_add_sta_cmd_v5 cmd_v5;
-
-       if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_STA_KEY_CMD)
-               return iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(*cmd),
-                                                  cmd, status);
-
-       iwl_mvm_add_sta_cmd_v7_to_v5(cmd, &cmd_v5);
-
-       return iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(cmd_v5),
-                                          &cmd_v5, status);
-}
-
-static int iwl_mvm_send_add_sta_cmd(struct iwl_mvm *mvm, u32 flags,
-                                   struct iwl_mvm_add_sta_cmd_v7 *cmd)
-{
-       struct iwl_mvm_add_sta_cmd_v5 cmd_v5;
-
-       if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_STA_KEY_CMD)
-               return iwl_mvm_send_cmd_pdu(mvm, ADD_STA, flags,
-                                           sizeof(*cmd), cmd);
-
-       iwl_mvm_add_sta_cmd_v7_to_v5(cmd, &cmd_v5);
-
-       return iwl_mvm_send_cmd_pdu(mvm, ADD_STA, flags, sizeof(cmd_v5),
-                                   &cmd_v5);
-}
-
-static int
-iwl_mvm_send_add_sta_key_cmd_status(struct iwl_mvm *mvm,
-                                   struct iwl_mvm_add_sta_key_cmd *cmd,
-                                   u32 mac_id_n_color,
-                                   int *status)
-{
-       struct iwl_mvm_add_sta_cmd_v5 sta_cmd;
-
-       if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_STA_KEY_CMD)
-               return iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY,
-                                                  sizeof(*cmd), cmd, status);
-
-       iwl_mvm_add_sta_key_to_add_sta_cmd_v5(cmd, &sta_cmd, mac_id_n_color);
-
-       return iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(sta_cmd),
-                                          &sta_cmd, status);
-}
-
-static int iwl_mvm_send_add_sta_key_cmd(struct iwl_mvm *mvm,
-                                       u32 flags,
-                                       struct iwl_mvm_add_sta_key_cmd *cmd,
-                                       u32 mac_id_n_color)
-{
-       struct iwl_mvm_add_sta_cmd_v5 sta_cmd;
-
-       if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_STA_KEY_CMD)
-               return iwl_mvm_send_cmd_pdu(mvm, ADD_STA_KEY, flags,
-                                           sizeof(*cmd), cmd);
-
-       iwl_mvm_add_sta_key_to_add_sta_cmd_v5(cmd, &sta_cmd, mac_id_n_color);
-
-       return iwl_mvm_send_cmd_pdu(mvm, ADD_STA, flags, sizeof(sta_cmd),
-                                   &sta_cmd);
-}
-
 static int iwl_mvm_find_free_sta_id(struct iwl_mvm *mvm,
                                    enum nl80211_iftype iftype)
 {
@@ -207,7 +98,7 @@ int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
                           bool update)
 {
        struct iwl_mvm_sta *mvm_sta = (void *)sta->drv_priv;
-       struct iwl_mvm_add_sta_cmd_v7 add_sta_cmd;
+       struct iwl_mvm_add_sta_cmd add_sta_cmd;
        int ret;
        u32 status;
        u32 agg_size = 0, mpdu_dens = 0;
@@ -295,7 +186,8 @@ int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
                cpu_to_le32(mpdu_dens << STA_FLG_AGG_MPDU_DENS_SHIFT);
 
        status = ADD_STA_SUCCESS;
-       ret = iwl_mvm_send_add_sta_cmd_status(mvm, &add_sta_cmd, &status);
+       ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(add_sta_cmd),
+                                         &add_sta_cmd, &status);
        if (ret)
                return ret;
 
@@ -380,7 +272,7 @@ int iwl_mvm_update_sta(struct iwl_mvm *mvm,
 int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
                      bool drain)
 {
-       struct iwl_mvm_add_sta_cmd_v7 cmd = {};
+       struct iwl_mvm_add_sta_cmd cmd = {};
        int ret;
        u32 status;
 
@@ -393,7 +285,8 @@ int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
        cmd.station_flags_msk = cpu_to_le32(STA_FLG_DRAIN_FLOW);
 
        status = ADD_STA_SUCCESS;
-       ret = iwl_mvm_send_add_sta_cmd_status(mvm, &cmd, &status);
+       ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(cmd),
+                                         &cmd, &status);
        if (ret)
                return ret;
 
@@ -498,7 +391,7 @@ void iwl_mvm_sta_drained_wk(struct work_struct *wk)
                                sta_id);
                        continue;
                }
-               rcu_assign_pointer(mvm->fw_id_to_mac_id[sta_id], NULL);
+               RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta_id], NULL);
                clear_bit(sta_id, mvm->sta_drained);
        }
 
@@ -520,14 +413,6 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
                /* flush its queues here since we are freeing mvm_sta */
                ret = iwl_mvm_flush_tx_path(mvm, mvm_sta->tfd_queue_msk, true);
 
-               /*
-                * Put a non-NULL since the fw station isn't removed.
-                * It will be removed after the MAC will be set as
-                * unassoc.
-                */
-               rcu_assign_pointer(mvm->fw_id_to_mac_id[mvm_sta->sta_id],
-                                  ERR_PTR(-EINVAL));
-
                /* if we are associated - we can't remove the AP STA now */
                if (vif->bss_conf.assoc)
                        return ret;
@@ -557,7 +442,7 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
        } else {
                spin_unlock_bh(&mvm_sta->lock);
                ret = iwl_mvm_rm_sta_common(mvm, mvm_sta->sta_id);
-               rcu_assign_pointer(mvm->fw_id_to_mac_id[mvm_sta->sta_id], NULL);
+               RCU_INIT_POINTER(mvm->fw_id_to_mac_id[mvm_sta->sta_id], NULL);
        }
 
        return ret;
@@ -571,7 +456,7 @@ int iwl_mvm_rm_sta_id(struct iwl_mvm *mvm,
 
        lockdep_assert_held(&mvm->mutex);
 
-       rcu_assign_pointer(mvm->fw_id_to_mac_id[sta_id], NULL);
+       RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta_id], NULL);
        return ret;
 }
 
@@ -593,7 +478,7 @@ int iwl_mvm_allocate_int_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *sta,
 
 void iwl_mvm_dealloc_int_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *sta)
 {
-       rcu_assign_pointer(mvm->fw_id_to_mac_id[sta->sta_id], NULL);
+       RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta->sta_id], NULL);
        memset(sta, 0, sizeof(struct iwl_mvm_int_sta));
        sta->sta_id = IWL_MVM_STATION_COUNT;
 }
@@ -603,13 +488,13 @@ static int iwl_mvm_add_int_sta_common(struct iwl_mvm *mvm,
                                      const u8 *addr,
                                      u16 mac_id, u16 color)
 {
-       struct iwl_mvm_add_sta_cmd_v7 cmd;
+       struct iwl_mvm_add_sta_cmd cmd;
        int ret;
        u32 status;
 
        lockdep_assert_held(&mvm->mutex);
 
-       memset(&cmd, 0, sizeof(struct iwl_mvm_add_sta_cmd_v7));
+       memset(&cmd, 0, sizeof(cmd));
        cmd.sta_id = sta->sta_id;
        cmd.mac_id_n_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mac_id,
                                                             color));
@@ -619,7 +504,8 @@ static int iwl_mvm_add_int_sta_common(struct iwl_mvm *mvm,
        if (addr)
                memcpy(cmd.addr, addr, ETH_ALEN);
 
-       ret = iwl_mvm_send_add_sta_cmd_status(mvm, &cmd, &status);
+       ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(cmd),
+                                         &cmd, &status);
        if (ret)
                return ret;
 
@@ -753,7 +639,7 @@ int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
                       int tid, u16 ssn, bool start)
 {
        struct iwl_mvm_sta *mvm_sta = (void *)sta->drv_priv;
-       struct iwl_mvm_add_sta_cmd_v7 cmd = {};
+       struct iwl_mvm_add_sta_cmd cmd = {};
        int ret;
        u32 status;
 
@@ -777,7 +663,8 @@ int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
                                  STA_MODIFY_REMOVE_BA_TID;
 
        status = ADD_STA_SUCCESS;
-       ret = iwl_mvm_send_add_sta_cmd_status(mvm, &cmd, &status);
+       ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(cmd),
+                                         &cmd, &status);
        if (ret)
                return ret;
 
@@ -812,7 +699,7 @@ static int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
                              int tid, u8 queue, bool start)
 {
        struct iwl_mvm_sta *mvm_sta = (void *)sta->drv_priv;
-       struct iwl_mvm_add_sta_cmd_v7 cmd = {};
+       struct iwl_mvm_add_sta_cmd cmd = {};
        int ret;
        u32 status;
 
@@ -834,7 +721,8 @@ static int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
        cmd.tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg);
 
        status = ADD_STA_SUCCESS;
-       ret = iwl_mvm_send_add_sta_cmd_status(mvm, &cmd, &status);
+       ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(cmd),
+                                         &cmd, &status);
        if (ret)
                return ret;
 
@@ -1129,12 +1017,11 @@ static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm,
                                u8 sta_id, u32 tkip_iv32, u16 *tkip_p1k,
                                u32 cmd_flags)
 {
-       __le16 key_flags;
        struct iwl_mvm_add_sta_key_cmd cmd = {};
+       __le16 key_flags;
        int ret, status;
        u16 keyidx;
        int i;
-       u32 mac_id_n_color = mvm_sta->mac_id_n_color;
 
        keyidx = (keyconf->keyidx << STA_KEY_FLG_KEYID_POS) &
                 STA_KEY_FLG_KEYID_MSK;
@@ -1167,12 +1054,11 @@ static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm,
 
        status = ADD_STA_SUCCESS;
        if (cmd_flags == CMD_SYNC)
-               ret = iwl_mvm_send_add_sta_key_cmd_status(mvm, &cmd,
-                                                         mac_id_n_color,
-                                                         &status);
+               ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, sizeof(cmd),
+                                                 &cmd, &status);
        else
-               ret = iwl_mvm_send_add_sta_key_cmd(mvm, CMD_ASYNC, &cmd,
-                                                  mac_id_n_color);
+               ret =  iwl_mvm_send_cmd_pdu(mvm, ADD_STA_KEY, CMD_ASYNC,
+                                           sizeof(cmd), &cmd);
 
        switch (status) {
        case ADD_STA_SUCCESS:
@@ -1399,9 +1285,8 @@ int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm,
        cmd.sta_id = sta_id;
 
        status = ADD_STA_SUCCESS;
-       ret = iwl_mvm_send_add_sta_key_cmd_status(mvm, &cmd,
-                                                 mvm_sta->mac_id_n_color,
-                                                 &status);
+       ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, sizeof(cmd),
+                                         &cmd, &status);
 
        switch (status) {
        case ADD_STA_SUCCESS:
@@ -1448,7 +1333,7 @@ void iwl_mvm_sta_modify_ps_wake(struct iwl_mvm *mvm,
                                struct ieee80211_sta *sta)
 {
        struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
-       struct iwl_mvm_add_sta_cmd_v7 cmd = {
+       struct iwl_mvm_add_sta_cmd cmd = {
                .add_modify = STA_MODE_MODIFY,
                .sta_id = mvmsta->sta_id,
                .station_flags_msk = cpu_to_le32(STA_FLG_PS),
@@ -1456,7 +1341,7 @@ void iwl_mvm_sta_modify_ps_wake(struct iwl_mvm *mvm,
        };
        int ret;
 
-       ret = iwl_mvm_send_add_sta_cmd(mvm, CMD_ASYNC, &cmd);
+       ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC, sizeof(cmd), &cmd);
        if (ret)
                IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
 }
@@ -1468,7 +1353,7 @@ void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm,
                                       bool agg)
 {
        struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
-       struct iwl_mvm_add_sta_cmd_v7 cmd = {
+       struct iwl_mvm_add_sta_cmd cmd = {
                .add_modify = STA_MODE_MODIFY,
                .sta_id = mvmsta->sta_id,
                .modify_mask = STA_MODIFY_SLEEPING_STA_TX_COUNT,
@@ -1538,7 +1423,7 @@ void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm,
                cmd.sleep_state_flags |= cpu_to_le16(STA_SLEEP_STATE_UAPSD);
        }
 
-       ret = iwl_mvm_send_add_sta_cmd(mvm, CMD_ASYNC, &cmd);
+       ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC, sizeof(cmd), &cmd);
        if (ret)
                IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
 }
index 2ed84c421481c79dfa1e847b1940ac78da8a9d9c..e5e3071ff2523a41d45283bec1a3106ac28e8c0b 100644 (file)
@@ -253,6 +253,8 @@ enum iwl_mvm_agg_state {
  *     This is basically (last acked packet++).
  * @rate_n_flags: Rate at which Tx was attempted. Holds the data between the
  *     Tx response (TX_CMD), and the block ack notification (COMPRESSED_BA).
+ * @reduced_tpc: Reduced tx power. Holds the data between the
+ *     Tx response (TX_CMD), and the block ack notification (COMPRESSED_BA).
  * @state: state of the BA agreement establishment / tear down.
  * @txq_id: Tx queue used by the BA session
  * @ssn: the first packet to be sent in AGG HW queue in Tx AGG start flow, or
@@ -265,6 +267,7 @@ struct iwl_mvm_tid_data {
        u16 next_reclaimed;
        /* The rest is Tx AGG related */
        u32 rate_n_flags;
+       u8 reduced_tpc;
        enum iwl_mvm_agg_state state;
        u16 txq_id;
        u16 ssn;
index 61331245ad9324f29ec5a86f12a3239725619673..a9402937f767a3492cb3783e0aee5848d3aa867c 100644 (file)
@@ -273,67 +273,10 @@ static bool iwl_mvm_time_event_response(struct iwl_notif_wait_data *notif_wait,
        return true;
 }
 
-/* used to convert from time event API v2 to v1 */
-#define TE_V2_DEP_POLICY_MSK (TE_V2_DEP_OTHER | TE_V2_DEP_TSF |\
-                            TE_V2_EVENT_SOCIOPATHIC)
-static inline u16 te_v2_get_notify(__le16 policy)
-{
-       return le16_to_cpu(policy) & TE_V2_NOTIF_MSK;
-}
-
-static inline u16 te_v2_get_dep_policy(__le16 policy)
-{
-       return (le16_to_cpu(policy) & TE_V2_DEP_POLICY_MSK) >>
-               TE_V2_PLACEMENT_POS;
-}
-
-static inline u16 te_v2_get_absence(__le16 policy)
-{
-       return (le16_to_cpu(policy) & TE_V2_ABSENCE) >> TE_V2_ABSENCE_POS;
-}
-
-static void iwl_mvm_te_v2_to_v1(const struct iwl_time_event_cmd_v2 *cmd_v2,
-                               struct iwl_time_event_cmd_v1 *cmd_v1)
-{
-       cmd_v1->id_and_color = cmd_v2->id_and_color;
-       cmd_v1->action = cmd_v2->action;
-       cmd_v1->id = cmd_v2->id;
-       cmd_v1->apply_time = cmd_v2->apply_time;
-       cmd_v1->max_delay = cmd_v2->max_delay;
-       cmd_v1->depends_on = cmd_v2->depends_on;
-       cmd_v1->interval = cmd_v2->interval;
-       cmd_v1->duration = cmd_v2->duration;
-       if (cmd_v2->repeat == TE_V2_REPEAT_ENDLESS)
-               cmd_v1->repeat = cpu_to_le32(TE_V1_REPEAT_ENDLESS);
-       else
-               cmd_v1->repeat = cpu_to_le32(cmd_v2->repeat);
-       cmd_v1->max_frags = cpu_to_le32(cmd_v2->max_frags);
-       cmd_v1->interval_reciprocal = 0; /* unused */
-
-       cmd_v1->dep_policy = cpu_to_le32(te_v2_get_dep_policy(cmd_v2->policy));
-       cmd_v1->is_present = cpu_to_le32(!te_v2_get_absence(cmd_v2->policy));
-       cmd_v1->notify = cpu_to_le32(te_v2_get_notify(cmd_v2->policy));
-}
-
-static int iwl_mvm_send_time_event_cmd(struct iwl_mvm *mvm,
-                                      const struct iwl_time_event_cmd_v2 *cmd)
-{
-       struct iwl_time_event_cmd_v1 cmd_v1;
-
-       if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_TIME_EVENT_API_V2)
-               return iwl_mvm_send_cmd_pdu(mvm, TIME_EVENT_CMD, CMD_SYNC,
-                                           sizeof(*cmd), cmd);
-
-       iwl_mvm_te_v2_to_v1(cmd, &cmd_v1);
-       return iwl_mvm_send_cmd_pdu(mvm, TIME_EVENT_CMD, CMD_SYNC,
-                                   sizeof(cmd_v1), &cmd_v1);
-}
-
-
 static int iwl_mvm_time_event_send_add(struct iwl_mvm *mvm,
                                       struct ieee80211_vif *vif,
                                       struct iwl_mvm_time_event_data *te_data,
-                                      struct iwl_time_event_cmd_v2 *te_cmd)
+                                      struct iwl_time_event_cmd *te_cmd)
 {
        static const u8 time_event_response[] = { TIME_EVENT_CMD };
        struct iwl_notification_wait wait_time_event;
@@ -369,7 +312,8 @@ static int iwl_mvm_time_event_send_add(struct iwl_mvm *mvm,
                                   ARRAY_SIZE(time_event_response),
                                   iwl_mvm_time_event_response, te_data);
 
-       ret = iwl_mvm_send_time_event_cmd(mvm, te_cmd);
+       ret = iwl_mvm_send_cmd_pdu(mvm, TIME_EVENT_CMD, CMD_SYNC,
+                                           sizeof(*te_cmd), te_cmd);
        if (ret) {
                IWL_ERR(mvm, "Couldn't send TIME_EVENT_CMD: %d\n", ret);
                iwl_remove_notification(&mvm->notif_wait, &wait_time_event);
@@ -397,7 +341,7 @@ void iwl_mvm_protect_session(struct iwl_mvm *mvm,
 {
        struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
        struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data;
-       struct iwl_time_event_cmd_v2 time_cmd = {};
+       struct iwl_time_event_cmd time_cmd = {};
 
        lockdep_assert_held(&mvm->mutex);
 
@@ -453,7 +397,7 @@ void iwl_mvm_remove_time_event(struct iwl_mvm *mvm,
                               struct iwl_mvm_vif *mvmvif,
                               struct iwl_mvm_time_event_data *te_data)
 {
-       struct iwl_time_event_cmd_v2 time_cmd = {};
+       struct iwl_time_event_cmd time_cmd = {};
        u32 id, uid;
        int ret;
 
@@ -490,7 +434,8 @@ void iwl_mvm_remove_time_event(struct iwl_mvm *mvm,
                cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
 
        IWL_DEBUG_TE(mvm, "Removing TE 0x%x\n", le32_to_cpu(time_cmd.id));
-       ret = iwl_mvm_send_time_event_cmd(mvm, &time_cmd);
+       ret = iwl_mvm_send_cmd_pdu(mvm, TIME_EVENT_CMD, CMD_SYNC,
+                                  sizeof(time_cmd), &time_cmd);
        if (WARN_ON(ret))
                return;
 }
@@ -510,7 +455,7 @@ int iwl_mvm_start_p2p_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
 {
        struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
        struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data;
-       struct iwl_time_event_cmd_v2 time_cmd = {};
+       struct iwl_time_event_cmd time_cmd = {};
 
        lockdep_assert_held(&mvm->mutex);
        if (te_data->running) {
index 7a99fa361954e0bc1d5e9e82bf94130b0692ac6f..39a3e03a0acdcc6aa594279906613b6fb53fa343 100644 (file)
@@ -468,13 +468,14 @@ void iwl_mvm_tt_handler(struct iwl_mvm *mvm)
        }
 
        if (params->support_tx_backoff) {
-               tx_backoff = 0;
+               tx_backoff = tt->min_backoff;
                for (i = 0; i < TT_TX_BACKOFF_SIZE; i++) {
                        if (temperature < params->tx_backoff[i].temperature)
                                break;
-                       tx_backoff = params->tx_backoff[i].backoff;
+                       tx_backoff = max(tt->min_backoff,
+                                        params->tx_backoff[i].backoff);
                }
-               if (tx_backoff != 0)
+               if (tx_backoff != tt->min_backoff)
                        throttle_enable = true;
                if (tt->tx_backoff != tx_backoff)
                        iwl_mvm_tt_tx_backoff(mvm, tx_backoff);
@@ -484,7 +485,8 @@ void iwl_mvm_tt_handler(struct iwl_mvm *mvm)
                IWL_WARN(mvm,
                         "Due to high temperature thermal throttling initiated\n");
                tt->throttle = true;
-       } else if (tt->throttle && !tt->dynamic_smps && tt->tx_backoff == 0 &&
+       } else if (tt->throttle && !tt->dynamic_smps &&
+                  tt->tx_backoff == tt->min_backoff &&
                   temperature <= params->tx_protection_exit) {
                IWL_WARN(mvm,
                         "Temperature is back to normal thermal throttling stopped\n");
index 879aeac46cc103112fef914bcc2b38df9f028b06..ff1b630e130eed5ab07a2bbcb222088df410d557 100644 (file)
@@ -636,7 +636,11 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
                        seq_ctl = le16_to_cpu(hdr->seq_ctrl);
                }
 
-               ieee80211_tx_status_ni(mvm->hw, skb);
+               BUILD_BUG_ON(ARRAY_SIZE(info->status.status_driver_data) < 1);
+               info->status.status_driver_data[0] =
+                               (void *)(uintptr_t)tx_resp->reduced_tpc;
+
+               ieee80211_tx_status(mvm->hw, skb);
        }
 
        if (txq_id >= mvm->first_agg_queue) {
@@ -815,6 +819,7 @@ static void iwl_mvm_rx_tx_cmd_agg(struct iwl_mvm *mvm,
                struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
                mvmsta->tid_data[tid].rate_n_flags =
                        le32_to_cpu(tx_resp->initial_rate);
+               mvmsta->tid_data[tid].reduced_tpc = tx_resp->reduced_tpc;
        }
 
        rcu_read_unlock();
@@ -928,6 +933,8 @@ int iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
                        info->status.ampdu_len = ba_notif->txed;
                        iwl_mvm_hwrate_to_tx_status(tid_data->rate_n_flags,
                                                    info);
+                       info->status.status_driver_data[0] =
+                               (void *)(uintptr_t)tid_data->reduced_tpc;
                }
        }
 
@@ -937,7 +944,7 @@ int iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
 
        while (!skb_queue_empty(&reclaimed_skbs)) {
                skb = __skb_dequeue(&reclaimed_skbs);
-               ieee80211_tx_status_ni(mvm->hw, skb);
+               ieee80211_tx_status(mvm->hw, skb);
        }
 
        return 0;
index d619851745a19ba6d3bf605555fcdbd5a09f8341..c5f4532cafa94e4eaf3c454a34fa3597da1a66b1 100644 (file)
@@ -64,6 +64,7 @@
 
 #include "iwl-debug.h"
 #include "iwl-io.h"
+#include "iwl-prph.h"
 
 #include "mvm.h"
 #include "fw-api-rs.h"
@@ -469,6 +470,8 @@ void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm)
                        mvm->status, table.valid);
        }
 
+       /* Do not change this output - scripts rely on it */
+
        IWL_ERR(mvm, "Loaded firmware version: %s\n", mvm->fw->fw_version);
 
        trace_iwlwifi_dev_ucode_error(trans->dev, table.error_id, table.tsf_low,
@@ -522,7 +525,7 @@ void iwl_mvm_fw_error_sram_dump(struct iwl_mvm *mvm)
        u32 ofs, sram_len;
        void *sram;
 
-       if (!mvm->ucode_loaded || mvm->fw_error_sram)
+       if (!mvm->ucode_loaded || mvm->fw_error_sram || mvm->fw_error_dump)
                return;
 
        img = &mvm->fw->img[mvm->cur_ucode];
@@ -538,6 +541,47 @@ void iwl_mvm_fw_error_sram_dump(struct iwl_mvm *mvm)
        mvm->fw_error_sram_len = sram_len;
 }
 
+void iwl_mvm_fw_error_rxf_dump(struct iwl_mvm *mvm)
+{
+       int i, reg_val;
+       unsigned long flags;
+
+       if (!mvm->ucode_loaded || mvm->fw_error_rxf || mvm->fw_error_dump)
+               return;
+
+       /* reading buffer size */
+       reg_val = iwl_trans_read_prph(mvm->trans, RXF_SIZE_ADDR);
+       mvm->fw_error_rxf_len =
+               (reg_val & RXF_SIZE_BYTE_CNT_MSK) >> RXF_SIZE_BYTE_CND_POS;
+
+       /* the register holds the value divided by 128 */
+       mvm->fw_error_rxf_len = mvm->fw_error_rxf_len << 7;
+
+       if (!mvm->fw_error_rxf_len)
+               return;
+
+       mvm->fw_error_rxf =  kzalloc(mvm->fw_error_rxf_len, GFP_ATOMIC);
+       if (!mvm->fw_error_rxf) {
+               mvm->fw_error_rxf_len = 0;
+               return;
+       }
+
+       if (!iwl_trans_grab_nic_access(mvm->trans, false, &flags)) {
+               kfree(mvm->fw_error_rxf);
+               mvm->fw_error_rxf = NULL;
+               mvm->fw_error_rxf_len = 0;
+               return;
+       }
+
+       for (i = 0; i < (mvm->fw_error_rxf_len / sizeof(u32)); i++) {
+               iwl_trans_write_prph(mvm->trans, RXF_LD_FENCE_OFFSET_ADDR,
+                                    i * sizeof(u32));
+               mvm->fw_error_rxf[i] =
+                       iwl_trans_read_prph(mvm->trans, RXF_FIFO_RD_FENCE_ADDR);
+       }
+       iwl_trans_release_nic_access(mvm->trans, &flags);
+}
+
 /**
  * iwl_mvm_send_lq_cmd() - Send link quality command
  * @init: This command is sent as part of station initialization right
index 9091513ea7388ce11f2294fbb609b3581073e2a0..1b95d856dfd53b7c29bd39fe1060c706b6816a91 100644 (file)
@@ -102,7 +102,7 @@ struct iwl_rxq {
        u32 write_actual;
        struct list_head rx_free;
        struct list_head rx_used;
-       int need_update;
+       bool need_update;
        struct iwl_rb_status *rb_stts;
        dma_addr_t rb_stts_dma;
        spinlock_t lock;
@@ -231,7 +231,7 @@ struct iwl_txq {
        spinlock_t lock;
        struct timer_list stuck_timer;
        struct iwl_trans_pcie *trans_pcie;
-       u8 need_update;
+       bool need_update;
        u8 active;
        bool ampdu;
 };
@@ -270,6 +270,9 @@ struct iwl_trans_pcie {
        struct iwl_trans *trans;
        struct iwl_drv *drv;
 
+       struct net_device napi_dev;
+       struct napi_struct napi;
+
        /* INT ICT Table */
        __le32 *ict_tbl;
        dma_addr_t ict_tbl_dma;
@@ -362,7 +365,7 @@ void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, int fifo,
 void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int queue);
 int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
                      struct iwl_device_cmd *dev_cmd, int txq_id);
-void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_txq *txq);
+void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans);
 int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
 void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
                            struct iwl_rx_cmd_buffer *rxb, int handler_status);
index fdfa3969cac986c1824bd65c41512a9ac4ba7b39..4a26a082a1ba1bf27b8da9a183b4f10e9804a511 100644 (file)
@@ -145,15 +145,13 @@ int iwl_pcie_rx_stop(struct iwl_trans *trans)
 /*
  * iwl_pcie_rxq_inc_wr_ptr - Update the write pointer for the RX queue
  */
-static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans,
-                                   struct iwl_rxq *rxq)
+static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans)
 {
+       struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+       struct iwl_rxq *rxq = &trans_pcie->rxq;
        u32 reg;
 
-       spin_lock(&rxq->lock);
-
-       if (rxq->need_update == 0)
-               goto exit_unlock;
+       lockdep_assert_held(&rxq->lock);
 
        /*
         * explicitly wake up the NIC if:
@@ -169,13 +167,27 @@ static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans,
                                       reg);
                        iwl_set_bit(trans, CSR_GP_CNTRL,
                                    CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
-                       goto exit_unlock;
+                       rxq->need_update = true;
+                       return;
                }
        }
 
        rxq->write_actual = round_down(rxq->write, 8);
        iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, rxq->write_actual);
-       rxq->need_update = 0;
+}
+
+static void iwl_pcie_rxq_check_wrptr(struct iwl_trans *trans)
+{
+       struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+       struct iwl_rxq *rxq = &trans_pcie->rxq;
+
+       spin_lock(&rxq->lock);
+
+       if (!rxq->need_update)
+               goto exit_unlock;
+
+       iwl_pcie_rxq_inc_wr_ptr(trans);
+       rxq->need_update = false;
 
  exit_unlock:
        spin_unlock(&rxq->lock);
@@ -236,9 +248,8 @@ static void iwl_pcie_rxq_restock(struct iwl_trans *trans)
         * Increment device's write pointer in multiples of 8. */
        if (rxq->write_actual != (rxq->write & ~0x7)) {
                spin_lock(&rxq->lock);
-               rxq->need_update = 1;
+               iwl_pcie_rxq_inc_wr_ptr(trans);
                spin_unlock(&rxq->lock);
-               iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
        }
 }
 
@@ -362,20 +373,9 @@ static void iwl_pcie_rxq_free_rbs(struct iwl_trans *trans)
  * Also restock the Rx queue via iwl_pcie_rxq_restock.
  * This is called as a scheduled work item (except for during initialization)
  */
-static void iwl_pcie_rx_replenish(struct iwl_trans *trans)
-{
-       struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-
-       iwl_pcie_rxq_alloc_rbs(trans, GFP_KERNEL);
-
-       spin_lock(&trans_pcie->irq_lock);
-       iwl_pcie_rxq_restock(trans);
-       spin_unlock(&trans_pcie->irq_lock);
-}
-
-static void iwl_pcie_rx_replenish_now(struct iwl_trans *trans)
+static void iwl_pcie_rx_replenish(struct iwl_trans *trans, gfp_t gfp)
 {
-       iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC);
+       iwl_pcie_rxq_alloc_rbs(trans, gfp);
 
        iwl_pcie_rxq_restock(trans);
 }
@@ -385,7 +385,7 @@ static void iwl_pcie_rx_replenish_work(struct work_struct *data)
        struct iwl_trans_pcie *trans_pcie =
            container_of(data, struct iwl_trans_pcie, rx_replenish);
 
-       iwl_pcie_rx_replenish(trans_pcie->trans);
+       iwl_pcie_rx_replenish(trans_pcie->trans, GFP_KERNEL);
 }
 
 static int iwl_pcie_rx_alloc(struct iwl_trans *trans)
@@ -521,14 +521,13 @@ int iwl_pcie_rx_init(struct iwl_trans *trans)
        memset(rxq->rb_stts, 0, sizeof(*rxq->rb_stts));
        spin_unlock(&rxq->lock);
 
-       iwl_pcie_rx_replenish(trans);
+       iwl_pcie_rx_replenish(trans, GFP_KERNEL);
 
        iwl_pcie_rx_hw_init(trans, rxq);
 
-       spin_lock(&trans_pcie->irq_lock);
-       rxq->need_update = 1;
-       iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
-       spin_unlock(&trans_pcie->irq_lock);
+       spin_lock(&rxq->lock);
+       iwl_pcie_rxq_inc_wr_ptr(trans);
+       spin_unlock(&rxq->lock);
 
        return 0;
 }
@@ -673,7 +672,6 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
        /* Reuse the page if possible. For notification packets and
         * SKBs that fail to Rx correctly, add them back into the
         * rx_free list for reuse later. */
-       spin_lock(&rxq->lock);
        if (rxb->page != NULL) {
                rxb->page_dma =
                        dma_map_page(trans->dev, rxb->page, 0,
@@ -694,7 +692,6 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
                }
        } else
                list_add_tail(&rxb->list, &rxq->rx_used);
-       spin_unlock(&rxq->lock);
 }
 
 /*
@@ -709,6 +706,8 @@ static void iwl_pcie_rx_handle(struct iwl_trans *trans)
        u32 count = 8;
        int total_empty;
 
+restart:
+       spin_lock(&rxq->lock);
        /* uCode's read index (stored in shared DRAM) indicates the last Rx
         * buffer that the driver may process (last buffer filled by ucode). */
        r = le16_to_cpu(ACCESS_ONCE(rxq->rb_stts->closed_rb_num)) & 0x0FFF;
@@ -743,18 +742,25 @@ static void iwl_pcie_rx_handle(struct iwl_trans *trans)
                        count++;
                        if (count >= 8) {
                                rxq->read = i;
-                               iwl_pcie_rx_replenish_now(trans);
+                               spin_unlock(&rxq->lock);
+                               iwl_pcie_rx_replenish(trans, GFP_ATOMIC);
                                count = 0;
+                               goto restart;
                        }
                }
        }
 
        /* Backtrack one entry */
        rxq->read = i;
+       spin_unlock(&rxq->lock);
+
        if (fill_rx)
-               iwl_pcie_rx_replenish_now(trans);
+               iwl_pcie_rx_replenish(trans, GFP_ATOMIC);
        else
                iwl_pcie_rxq_restock(trans);
+
+       if (trans_pcie->napi.poll)
+               napi_gro_flush(&trans_pcie->napi, false);
 }
 
 /*
@@ -876,7 +882,6 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
        struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
        u32 inta = 0;
        u32 handled = 0;
-       u32 i;
 
        lock_map_acquire(&trans->sync_cmd_lockdep_map);
 
@@ -1028,9 +1033,8 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
        /* uCode wakes up after power-down sleep */
        if (inta & CSR_INT_BIT_WAKEUP) {
                IWL_DEBUG_ISR(trans, "Wakeup interrupt\n");
-               iwl_pcie_rxq_inc_wr_ptr(trans, &trans_pcie->rxq);
-               for (i = 0; i < trans->cfg->base_params->num_of_queues; i++)
-                       iwl_pcie_txq_inc_wr_ptr(trans, &trans_pcie->txq[i]);
+               iwl_pcie_rxq_check_wrptr(trans);
+               iwl_pcie_txq_check_wrptrs(trans);
 
                isr_stats->wakeup++;
 
@@ -1068,8 +1072,6 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
                iwl_write8(trans, CSR_INT_PERIODIC_REG,
                            CSR_INT_PERIODIC_DIS);
 
-               iwl_pcie_rx_handle(trans);
-
                /*
                 * Enable periodic interrupt in 8 msec only if we received
                 * real RX interrupt (instead of just periodic int), to catch
@@ -1082,6 +1084,10 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
                                   CSR_INT_PERIODIC_ENA);
 
                isr_stats->rx++;
+
+               local_bh_disable();
+               iwl_pcie_rx_handle(trans);
+               local_bh_enable();
        }
 
        /* This "Tx" DMA channel is used only for loading uCode */
index dcfd6d866d095081d7001795c4ec802c3044926f..f98ef1e62eb9795fad48ea56d7251f5f658ef5cd 100644 (file)
@@ -103,7 +103,6 @@ static void iwl_pcie_set_pwr(struct iwl_trans *trans, bool vaux)
 
 /* PCI registers */
 #define PCI_CFG_RETRY_TIMEOUT  0x041
-#define CPU1_CPU2_SEPARATOR_SECTION    0xFFFFCCCC
 
 static void iwl_pcie_apm_config(struct iwl_trans *trans)
 {
@@ -1053,6 +1052,12 @@ static void iwl_trans_pcie_write_prph(struct iwl_trans *trans, u32 addr,
        iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_WDAT, val);
 }
 
+static int iwl_pcie_dummy_napi_poll(struct napi_struct *napi, int budget)
+{
+       WARN_ON(1);
+       return 0;
+}
+
 static void iwl_trans_pcie_configure(struct iwl_trans *trans,
                                     const struct iwl_trans_config *trans_cfg)
 {
@@ -1079,6 +1084,18 @@ static void iwl_trans_pcie_configure(struct iwl_trans *trans,
 
        trans_pcie->command_names = trans_cfg->command_names;
        trans_pcie->bc_table_dword = trans_cfg->bc_table_dword;
+
+       /* Initialize NAPI here - it should be before registering to mac80211
+        * in the opmode but after the HW struct is allocated.
+        * As this function may be called again in some corner cases don't
+        * do anything if NAPI was already initialized.
+        */
+       if (!trans_pcie->napi.poll && trans->op_mode->ops->napi_add) {
+               init_dummy_netdev(&trans_pcie->napi_dev);
+               iwl_op_mode_napi_add(trans->op_mode, &trans_pcie->napi,
+                                    &trans_pcie->napi_dev,
+                                    iwl_pcie_dummy_napi_poll, 64);
+       }
 }
 
 void iwl_trans_pcie_free(struct iwl_trans *trans)
@@ -1099,6 +1116,9 @@ void iwl_trans_pcie_free(struct iwl_trans *trans)
        pci_disable_device(trans_pcie->pci_dev);
        kmem_cache_destroy(trans->dev_cmd_pool);
 
+       if (trans_pcie->napi.poll)
+               netif_napi_del(&trans_pcie->napi);
+
        kfree(trans);
 }
 
@@ -1237,7 +1257,7 @@ static int iwl_trans_pcie_write_mem(struct iwl_trans *trans, u32 addr,
 
 #define IWL_FLUSH_WAIT_MS      2000
 
-static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans)
+static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, u32 txq_bm)
 {
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
        struct iwl_txq *txq;
@@ -1250,13 +1270,31 @@ static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans)
 
        /* waiting for all the tx frames complete might take a while */
        for (cnt = 0; cnt < trans->cfg->base_params->num_of_queues; cnt++) {
+               u8 wr_ptr;
+
                if (cnt == trans_pcie->cmd_queue)
                        continue;
+               if (!test_bit(cnt, trans_pcie->queue_used))
+                       continue;
+               if (!(BIT(cnt) & txq_bm))
+                       continue;
+
+               IWL_DEBUG_TX_QUEUES(trans, "Emptying queue %d...\n", cnt);
                txq = &trans_pcie->txq[cnt];
                q = &txq->q;
-               while (q->read_ptr != q->write_ptr && !time_after(jiffies,
-                      now + msecs_to_jiffies(IWL_FLUSH_WAIT_MS)))
+               wr_ptr = ACCESS_ONCE(q->write_ptr);
+
+               while (q->read_ptr != ACCESS_ONCE(q->write_ptr) &&
+                      !time_after(jiffies,
+                                  now + msecs_to_jiffies(IWL_FLUSH_WAIT_MS))) {
+                       u8 write_ptr = ACCESS_ONCE(q->write_ptr);
+
+                       if (WARN_ONCE(wr_ptr != write_ptr,
+                                     "WR pointer moved while flushing %d -> %d\n",
+                                     wr_ptr, write_ptr))
+                               return -ETIMEDOUT;
                        msleep(1);
+               }
 
                if (q->read_ptr != q->write_ptr) {
                        IWL_ERR(trans,
@@ -1264,6 +1302,7 @@ static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans)
                        ret = -ETIMEDOUT;
                        break;
                }
+               IWL_DEBUG_TX_QUEUES(trans, "Queue %d is now empty.\n", cnt);
        }
 
        if (!ret)
index 3b0c72c1005446d2d93f7eb15ed6d3207e1c3ea1..dde6031f4257f0b3a8fd8d46e5d5fe712f3a4ea0 100644 (file)
@@ -287,14 +287,14 @@ static void iwl_pcie_txq_inval_byte_cnt_tbl(struct iwl_trans *trans,
 /*
  * iwl_pcie_txq_inc_wr_ptr - Send new write index to hardware
  */
-void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_txq *txq)
+static void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans,
+                                   struct iwl_txq *txq)
 {
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
        u32 reg = 0;
        int txq_id = txq->q.id;
 
-       if (txq->need_update == 0)
-               return;
+       lockdep_assert_held(&txq->lock);
 
        /*
         * explicitly wake up the NIC if:
@@ -317,6 +317,7 @@ void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_txq *txq)
                                       txq_id, reg);
                        iwl_set_bit(trans, CSR_GP_CNTRL,
                                    CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+                       txq->need_update = true;
                        return;
                }
        }
@@ -327,8 +328,23 @@ void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_txq *txq)
         */
        IWL_DEBUG_TX(trans, "Q:%d WR: 0x%x\n", txq_id, txq->q.write_ptr);
        iwl_write32(trans, HBUS_TARG_WRPTR, txq->q.write_ptr | (txq_id << 8));
+}
+
+void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans)
+{
+       struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+       int i;
 
-       txq->need_update = 0;
+       for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) {
+               struct iwl_txq *txq = &trans_pcie->txq[i];
+
+               spin_lock(&txq->lock);
+               if (trans_pcie->txq[i].need_update) {
+                       iwl_pcie_txq_inc_wr_ptr(trans, txq);
+                       trans_pcie->txq[i].need_update = false;
+               }
+               spin_unlock(&txq->lock);
+       }
 }
 
 static inline dma_addr_t iwl_pcie_tfd_tb_get_addr(struct iwl_tfd *tfd, u8 idx)
@@ -542,7 +558,7 @@ static int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq,
 {
        int ret;
 
-       txq->need_update = 0;
+       txq->need_update = false;
 
        /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
         * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */
@@ -680,7 +696,8 @@ void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr)
        /* The chain extension of the SCD doesn't work well. This feature is
         * enabled by default by the HW, so we need to disable it manually.
         */
-       iwl_write_prph(trans, SCD_CHAINEXT_EN, 0);
+       if (trans->cfg->base_params->scd_chain_ext_wa)
+               iwl_write_prph(trans, SCD_CHAINEXT_EN, 0);
 
        iwl_trans_ac_txq_enable(trans, trans_pcie->cmd_queue,
                                trans_pcie->cmd_fifo);
@@ -1028,7 +1045,8 @@ static void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx)
                }
        }
 
-       if (q->read_ptr == q->write_ptr) {
+       if (trans->cfg->base_params->apmg_wake_up_wa &&
+           q->read_ptr == q->write_ptr) {
                spin_lock_irqsave(&trans_pcie->reg_lock, flags);
                WARN_ON(!trans_pcie->cmd_in_flight);
                trans_pcie->cmd_in_flight = false;
@@ -1392,8 +1410,6 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
                kfree(txq->entries[idx].free_buf);
        txq->entries[idx].free_buf = dup_buf;
 
-       txq->need_update = 1;
-
        trace_iwlwifi_dev_hcmd(trans->dev, cmd, cmd_size, &out_cmd->hdr);
 
        /* start timer if queue currently empty */
@@ -1405,9 +1421,11 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
        /*
         * wake up the NIC to make sure that the firmware will see the host
         * command - we will let the NIC sleep once all the host commands
-        * returned.
+        * returned. This needs to be done only on NICs that have
+        * apmg_wake_up_wa set.
         */
-       if (!trans_pcie->cmd_in_flight) {
+       if (trans->cfg->base_params->apmg_wake_up_wa &&
+           !trans_pcie->cmd_in_flight) {
                trans_pcie->cmd_in_flight = true;
                __iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL,
                                         CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
@@ -1661,7 +1679,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
        dma_addr_t tb0_phys, tb1_phys, scratch_phys;
        void *tb1_addr;
        u16 len, tb1_len, tb2_len;
-       u8 wait_write_ptr = 0;
+       bool wait_write_ptr;
        __le16 fc = hdr->frame_control;
        u8 hdr_len = ieee80211_hdrlen(fc);
        u16 wifi_seq;
@@ -1762,12 +1780,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
        trace_iwlwifi_dev_tx_data(trans->dev, skb,
                                  skb->data + hdr_len, tb2_len);
 
-       if (!ieee80211_has_morefrags(fc)) {
-               txq->need_update = 1;
-       } else {
-               wait_write_ptr = 1;
-               txq->need_update = 0;
-       }
+       wait_write_ptr = ieee80211_has_morefrags(fc);
 
        /* start timer if queue currently empty */
        if (txq->need_update && q->read_ptr == q->write_ptr &&
@@ -1776,21 +1789,18 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
 
        /* Tell device the write index *just past* this latest filled TFD */
        q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
-       iwl_pcie_txq_inc_wr_ptr(trans, txq);
+       if (!wait_write_ptr)
+               iwl_pcie_txq_inc_wr_ptr(trans, txq);
 
        /*
         * At this point the frame is "transmitted" successfully
-        * and we will get a TX status notification eventually,
-        * regardless of the value of ret. "ret" only indicates
-        * whether or not we should update the write pointer.
+        * and we will get a TX status notification eventually.
         */
        if (iwl_queue_space(q) < q->high_mark) {
-               if (wait_write_ptr) {
-                       txq->need_update = 1;
+               if (wait_write_ptr)
                        iwl_pcie_txq_inc_wr_ptr(trans, txq);
-               } else {
+               else
                        iwl_stop_queue(trans, txq);
-               }
        }
        spin_unlock(&txq->lock);
        return 0;
index 9d7a52f5a4102abedd2dbebc03c26c3866da2a64..a312c653d1163fcc5c4ff394a54b0c7a96370d8f 100644 (file)
@@ -1676,7 +1676,9 @@ static int mac80211_hwsim_ampdu_action(struct ieee80211_hw *hw,
        return 0;
 }
 
-static void mac80211_hwsim_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
+static void mac80211_hwsim_flush(struct ieee80211_hw *hw,
+                                struct ieee80211_vif *vif,
+                                u32 queues, bool drop)
 {
        /* Not implemented, queues only on kernel side */
 }
@@ -2056,6 +2058,7 @@ static int mac80211_hwsim_create_radio(int channels, const char *reg_alpha2,
                            WIPHY_FLAG_AP_UAPSD |
                            WIPHY_FLAG_HAS_CHANNEL_SWITCH;
        hw->wiphy->features |= NL80211_FEATURE_ACTIVE_MONITOR;
+       hw->wiphy->features |= NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE;
 
        /* ask mac80211 to reserve space for magic */
        hw->vif_data_size = sizeof(struct hwsim_vif_priv);
index c92f27aa71ede1f049c101a0ba185f31d982aaa3..706831df1fa2a4183cb3c5ad849f1aa8df8dbb14 100644 (file)
@@ -212,8 +212,7 @@ int mwifiex_cmd_append_11ac_tlv(struct mwifiex_private *priv,
                                      sizeof(struct mwifiex_ie_types_header));
                        memcpy((u8 *)vht_op +
                                sizeof(struct mwifiex_ie_types_header),
-                              (u8 *)bss_desc->bcn_vht_oper +
-                              sizeof(struct ieee_types_header),
+                              (u8 *)bss_desc->bcn_vht_oper,
                               le16_to_cpu(vht_op->header.len));
 
                        /* negotiate the channel width and central freq
index d14ead8beca860dba6c984d26df095b104bb1375..2bd07d681c5e7fbedfacd35e9bcad038fcf56970 100644 (file)
@@ -345,8 +345,7 @@ mwifiex_cmd_append_11n_tlv(struct mwifiex_private *priv,
 
                        memcpy((u8 *) ht_info +
                               sizeof(struct mwifiex_ie_types_header),
-                              (u8 *) bss_desc->bcn_ht_oper +
-                              sizeof(struct ieee_types_header),
+                              (u8 *)bss_desc->bcn_ht_oper,
                               le16_to_cpu(ht_info->header.len));
 
                        if (!(sband->ht_cap.cap &
index 63211707f93955c851bfb96d71f12d5ef1f4a313..860dfe71cf965b0b7e2033d1eb5a07702f6ef57c 100644 (file)
@@ -160,6 +160,7 @@ mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv,
        int pad = 0, ret;
        struct mwifiex_tx_param tx_param;
        struct txpd *ptx_pd = NULL;
+       struct timeval tv;
        int headroom = adapter->iface_type == MWIFIEX_USB ? 0 : INTF_HEADER_LEN;
 
        skb_src = skb_peek(&pra_list->skb_head);
@@ -184,6 +185,9 @@ mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv,
        tx_info_aggr->bss_num = tx_info_src->bss_num;
        skb_aggr->priority = skb_src->priority;
 
+       do_gettimeofday(&tv);
+       skb_aggr->tstamp = timeval_to_ktime(tv);
+
        do {
                /* Check if AMSDU can accommodate this MSDU */
                if (skb_tailroom(skb_aggr) < (skb_src->len + LLC_SNAP_LEN))
@@ -236,18 +240,11 @@ mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv,
                ret = adapter->if_ops.host_to_card(adapter, MWIFIEX_USB_EP_DATA,
                                                   skb_aggr, NULL);
        } else {
-               /*
-                * Padding per MSDU will affect the length of next
-                * packet and hence the exact length of next packet
-                * is uncertain here.
-                *
-                * Also, aggregation of transmission buffer, while
-                * downloading the data to the card, wont gain much
-                * on the AMSDU packets as the AMSDU packets utilizes
-                * the transmission buffer space to the maximum
-                * (adapter->tx_buf_size).
-                */
-               tx_param.next_pkt_len = 0;
+               if (skb_src)
+                       tx_param.next_pkt_len =
+                                       skb_src->len + sizeof(struct txpd);
+               else
+                       tx_param.next_pkt_len = 0;
 
                ret = adapter->if_ops.host_to_card(adapter, MWIFIEX_TYPE_DATA,
                                                   skb_aggr, &tx_param);
index b9242c3dca435ee9a4d5123fd57ad0733a96a24d..3b55ce5690a54e226c5482f523a3c80d1e95d7bf 100644 (file)
@@ -200,4 +200,11 @@ getlog
 
        cat getlog
 
+fw_dump
+       This command is used to dump firmware memory into files.
+       Separate file will be created for each memory segment.
+       Usage:
+
+       cat fw_dump
+
 ===============================================================================
index 1062c918a7bffb19cf93c1aba0daa4490856ba65..8dee6c86f4f1dc91e65978b6f7443ac9f00c2118 100644 (file)
@@ -955,8 +955,6 @@ mwifiex_cmd_timeout_func(unsigned long function_context)
                        adapter->cmd_wait_q.status = -ETIMEDOUT;
                        wake_up_interruptible(&adapter->cmd_wait_q.wait);
                        mwifiex_cancel_pending_ioctl(adapter);
-                       /* reset cmd_sent flag to unblock new commands */
-                       adapter->cmd_sent = false;
                }
        }
        if (adapter->hw_status == MWIFIEX_HW_STATUS_INITIALIZING)
index b8a49aad12fd662434ce2a29aaa8edecfb52ba0b..7b419bbcd5444f5c5abdf40ffb2368087b77e89a 100644 (file)
@@ -256,6 +256,29 @@ free_and_exit:
        return ret;
 }
 
+/*
+ * Proc firmware dump read handler.
+ *
+ * This function is called when the 'fw_dump' file is opened for
+ * reading.
+ * This function dumps firmware memory in different files
+ * (ex. DTCM, ITCM, SQRAM etc.) based on the the segments for
+ * debugging.
+ */
+static ssize_t
+mwifiex_fw_dump_read(struct file *file, char __user *ubuf,
+                    size_t count, loff_t *ppos)
+{
+       struct mwifiex_private *priv = file->private_data;
+
+       if (!priv->adapter->if_ops.fw_dump)
+               return -EIO;
+
+       priv->adapter->if_ops.fw_dump(priv->adapter);
+
+       return 0;
+}
+
 /*
  * Proc getlog file read handler.
  *
@@ -699,6 +722,7 @@ static const struct file_operations mwifiex_dfs_##name##_fops = {       \
 MWIFIEX_DFS_FILE_READ_OPS(info);
 MWIFIEX_DFS_FILE_READ_OPS(debug);
 MWIFIEX_DFS_FILE_READ_OPS(getlog);
+MWIFIEX_DFS_FILE_READ_OPS(fw_dump);
 MWIFIEX_DFS_FILE_OPS(regrdwr);
 MWIFIEX_DFS_FILE_OPS(rdeeprom);
 
@@ -722,6 +746,7 @@ mwifiex_dev_debugfs_init(struct mwifiex_private *priv)
        MWIFIEX_DFS_ADD_FILE(getlog);
        MWIFIEX_DFS_ADD_FILE(regrdwr);
        MWIFIEX_DFS_ADD_FILE(rdeeprom);
+       MWIFIEX_DFS_ADD_FILE(fw_dump);
 }
 
 /*
index e7b3e16e5d34f1f8703ec2f6d4e21e9388c49edd..38da6ff6f41623618efa22add335ffef1fa46828 100644 (file)
 #define MWIFIEX_MAX_TX_BASTREAM_SUPPORTED      2
 #define MWIFIEX_MAX_RX_BASTREAM_SUPPORTED      16
 
-#define MWIFIEX_STA_AMPDU_DEF_TXWINSIZE        16
-#define MWIFIEX_STA_AMPDU_DEF_RXWINSIZE        32
+#define MWIFIEX_STA_AMPDU_DEF_TXWINSIZE        64
+#define MWIFIEX_STA_AMPDU_DEF_RXWINSIZE        64
 #define MWIFIEX_UAP_AMPDU_DEF_TXWINSIZE        32
 #define MWIFIEX_UAP_AMPDU_DEF_RXWINSIZE        16
-#define MWIFIEX_11AC_STA_AMPDU_DEF_TXWINSIZE   32
-#define MWIFIEX_11AC_STA_AMPDU_DEF_RXWINSIZE   48
+#define MWIFIEX_11AC_STA_AMPDU_DEF_TXWINSIZE   64
+#define MWIFIEX_11AC_STA_AMPDU_DEF_RXWINSIZE   64
 #define MWIFIEX_11AC_UAP_AMPDU_DEF_TXWINSIZE   48
 #define MWIFIEX_11AC_UAP_AMPDU_DEF_RXWINSIZE   32
 
index ee494db5406097c35a0f22b365e2ad2e1ac674f8..1b576722671d5e6f228363c36c94c4ef47867980 100644 (file)
@@ -303,7 +303,7 @@ struct mwifiex_ds_ant_cfg {
        u32 rx_ant;
 };
 
-#define MWIFIEX_NUM_OF_CMD_BUFFER      20
+#define MWIFIEX_NUM_OF_CMD_BUFFER      50
 #define MWIFIEX_SIZE_OF_CMD_BUFFER     2048
 
 enum {
index 9c771b3e99186ffe838f771b217216ebc5a43454..cbabc12fbda390d063218375eb2b4cadc3911b8f 100644 (file)
@@ -521,7 +521,6 @@ done:
                release_firmware(adapter->firmware);
                adapter->firmware = NULL;
        }
-       complete(&adapter->fw_load);
        if (init_failed)
                mwifiex_free_adapter(adapter);
        up(sem);
@@ -535,7 +534,6 @@ static int mwifiex_init_hw_fw(struct mwifiex_adapter *adapter)
 {
        int ret;
 
-       init_completion(&adapter->fw_load);
        ret = request_firmware_nowait(THIS_MODULE, 1, adapter->fw_name,
                                      adapter->dev, GFP_KERNEL, adapter,
                                      mwifiex_fw_dpc);
index d53e1e8c9467a62663c4d28df86e623237cdc45f..34181192a666b71a759cfd65c6e3c30c2eb1ca38 100644 (file)
@@ -672,6 +672,7 @@ struct mwifiex_if_ops {
        int (*init_fw_port) (struct mwifiex_adapter *);
        int (*dnld_fw) (struct mwifiex_adapter *, struct mwifiex_fw_image *);
        void (*card_reset) (struct mwifiex_adapter *);
+       void (*fw_dump)(struct mwifiex_adapter *);
        int (*clean_pcie_ring) (struct mwifiex_adapter *adapter);
 };
 
@@ -787,7 +788,6 @@ struct mwifiex_adapter {
        struct mwifiex_wait_queue cmd_wait_q;
        u8 scan_wait_q_woken;
        spinlock_t queue_lock;          /* lock for tx queues */
-       struct completion fw_load;
        u8 country_code[IEEE80211_COUNTRY_STRING_LEN];
        u16 max_mgmt_ie_index;
        u8 scan_delay_cnt;
index a7e8b96b2d9024de8c34e5e04b317c66d2e22820..c2cfeec466d87b40811515bec11be698943a4f55 100644 (file)
@@ -221,9 +221,6 @@ static void mwifiex_pcie_remove(struct pci_dev *pdev)
        if (!adapter || !adapter->priv_num)
                return;
 
-       /* In case driver is removed when asynchronous FW load is in progress */
-       wait_for_completion(&adapter->fw_load);
-
        if (user_rmmod) {
 #ifdef CONFIG_PM_SLEEP
                if (adapter->is_suspended)
index 7b3af3d29ded478ad658eed5a3836403d6dd7542..d75f4ebd4bdce7767c797fe28c9fd3f9eb4fa2a9 100644 (file)
@@ -29,9 +29,6 @@
 #define MWIFIEX_MAX_CHANNELS_PER_SPECIFIC_SCAN   14
 
 #define MWIFIEX_DEF_CHANNELS_PER_SCAN_CMD      4
-#define MWIFIEX_LIMIT_1_CHANNEL_PER_SCAN_CMD   15
-#define MWIFIEX_LIMIT_2_CHANNELS_PER_SCAN_CMD  27
-#define MWIFIEX_LIMIT_3_CHANNELS_PER_SCAN_CMD  35
 
 /* Memory needed to store a max sized Channel List TLV for a firmware scan */
 #define CHAN_TLV_MAX_SIZE  (sizeof(struct mwifiex_ie_types_header)         \
@@ -1055,20 +1052,10 @@ mwifiex_config_scan(struct mwifiex_private *priv,
 
        /*
         * In associated state we will reduce the number of channels scanned per
-        * scan command to avoid any traffic delay/loss. This number is decided
-        * based on total number of channels to be scanned due to constraints
-        * of command buffers.
+        * scan command to 1 to avoid any traffic delay/loss.
         */
-       if (priv->media_connected) {
-               if (chan_num < MWIFIEX_LIMIT_1_CHANNEL_PER_SCAN_CMD)
+       if (priv->media_connected)
                        *max_chan_per_scan = 1;
-               else if (chan_num < MWIFIEX_LIMIT_2_CHANNELS_PER_SCAN_CMD)
-                       *max_chan_per_scan = 2;
-               else if (chan_num < MWIFIEX_LIMIT_3_CHANNELS_PER_SCAN_CMD)
-                       *max_chan_per_scan = 3;
-               else
-                       *max_chan_per_scan = 4;
-       }
 }
 
 /*
@@ -1353,23 +1340,17 @@ int mwifiex_update_bss_desc_with_ie(struct mwifiex_adapter *adapter,
                                              bss_entry->beacon_buf);
                        break;
                case WLAN_EID_BSS_COEX_2040:
-                       bss_entry->bcn_bss_co_2040 = current_ptr +
-                               sizeof(struct ieee_types_header);
-                       bss_entry->bss_co_2040_offset = (u16) (current_ptr +
-                                       sizeof(struct ieee_types_header) -
-                                               bss_entry->beacon_buf);
+                       bss_entry->bcn_bss_co_2040 = current_ptr;
+                       bss_entry->bss_co_2040_offset =
+                               (u16) (current_ptr - bss_entry->beacon_buf);
                        break;
                case WLAN_EID_EXT_CAPABILITY:
-                       bss_entry->bcn_ext_cap = current_ptr +
-                               sizeof(struct ieee_types_header);
-                       bss_entry->ext_cap_offset = (u16) (current_ptr +
-                                       sizeof(struct ieee_types_header) -
-                                       bss_entry->beacon_buf);
+                       bss_entry->bcn_ext_cap = current_ptr;
+                       bss_entry->ext_cap_offset =
+                               (u16) (current_ptr - bss_entry->beacon_buf);
                        break;
                case WLAN_EID_OPMODE_NOTIF:
-                       bss_entry->oper_mode =
-                               (void *)(current_ptr +
-                                        sizeof(struct ieee_types_header));
+                       bss_entry->oper_mode = (void *)current_ptr;
                        bss_entry->oper_mode_offset =
                                        (u16)((u8 *)bss_entry->oper_mode -
                                              bss_entry->beacon_buf);
index d206f04d499498d6d9c7ba92a80685588ae7bc96..a1773d3cb49f4805503ba8f9c104f685ae7114b4 100644 (file)
@@ -85,6 +85,8 @@ mwifiex_sdio_probe(struct sdio_func *func, const struct sdio_device_id *id)
                card->supports_sdio_new_mode = data->supports_sdio_new_mode;
                card->has_control_mask = data->has_control_mask;
                card->tx_buf_size = data->tx_buf_size;
+               card->mp_tx_agg_buf_size = data->mp_tx_agg_buf_size;
+               card->mp_rx_agg_buf_size = data->mp_rx_agg_buf_size;
        }
 
        sdio_claim_host(func);
@@ -177,9 +179,6 @@ mwifiex_sdio_remove(struct sdio_func *func)
        if (!adapter || !adapter->priv_num)
                return;
 
-       /* In case driver is removed when asynchronous FW load is in progress */
-       wait_for_completion(&adapter->fw_load);
-
        if (user_rmmod) {
                if (adapter->is_suspended)
                        mwifiex_sdio_resume(adapter->dev);
@@ -1842,8 +1841,8 @@ static int mwifiex_init_sdio(struct mwifiex_adapter *adapter)
        card->mpa_rx.len_arr = kzalloc(sizeof(*card->mpa_rx.len_arr) *
                                       card->mp_agg_pkt_limit, GFP_KERNEL);
        ret = mwifiex_alloc_sdio_mpa_buffers(adapter,
-                                            SDIO_MP_TX_AGGR_DEF_BUF_SIZE,
-                                            SDIO_MP_RX_AGGR_DEF_BUF_SIZE);
+                                            card->mp_tx_agg_buf_size,
+                                            card->mp_rx_agg_buf_size);
        if (ret) {
                dev_err(adapter->dev, "failed to alloc sdio mp-a buffers\n");
                kfree(card->mp_regs);
index c71201b2e2a333c20f926bacb4842395230c6926..6eea30b43ed714f3bd81f9453a5008001a11b33d 100644 (file)
 #define UP_LD_CMD_PORT_HOST_INT_STATUS (0x40U)
 #define DN_LD_CMD_PORT_HOST_INT_STATUS (0x80U)
 
-#define SDIO_MP_TX_AGGR_DEF_BUF_SIZE        (8192)     /* 8K */
-
-/* Multi port RX aggregation buffer size */
-#define SDIO_MP_RX_AGGR_DEF_BUF_SIZE        (16384)    /* 16K */
+#define MWIFIEX_MP_AGGR_BUF_SIZE_16K   (16384)
+#define MWIFIEX_MP_AGGR_BUF_SIZE_32K   (32768)
 
 /* Misc. Config Register : Auto Re-enable interrupts */
 #define AUTO_RE_ENABLE_INT              BIT(4)
@@ -234,6 +232,8 @@ struct sdio_mmc_card {
        bool supports_sdio_new_mode;
        bool has_control_mask;
        u16 tx_buf_size;
+       u32 mp_tx_agg_buf_size;
+       u32 mp_rx_agg_buf_size;
 
        u32 mp_rd_bitmap;
        u32 mp_wr_bitmap;
@@ -258,6 +258,8 @@ struct mwifiex_sdio_device {
        bool supports_sdio_new_mode;
        bool has_control_mask;
        u16 tx_buf_size;
+       u32 mp_tx_agg_buf_size;
+       u32 mp_rx_agg_buf_size;
 };
 
 static const struct mwifiex_sdio_card_reg mwifiex_reg_sd87xx = {
@@ -315,6 +317,8 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8786 = {
        .supports_sdio_new_mode = false,
        .has_control_mask = true,
        .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K,
+       .mp_tx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_16K,
+       .mp_rx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_16K,
 };
 
 static const struct mwifiex_sdio_device mwifiex_sdio_sd8787 = {
@@ -325,6 +329,8 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8787 = {
        .supports_sdio_new_mode = false,
        .has_control_mask = true,
        .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K,
+       .mp_tx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_16K,
+       .mp_rx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_16K,
 };
 
 static const struct mwifiex_sdio_device mwifiex_sdio_sd8797 = {
@@ -335,6 +341,8 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8797 = {
        .supports_sdio_new_mode = false,
        .has_control_mask = true,
        .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K,
+       .mp_tx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_16K,
+       .mp_rx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_16K,
 };
 
 static const struct mwifiex_sdio_device mwifiex_sdio_sd8897 = {
@@ -345,6 +353,8 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8897 = {
        .supports_sdio_new_mode = true,
        .has_control_mask = false,
        .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_4K,
+       .mp_tx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_32K,
+       .mp_rx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_32K,
 };
 
 /*
index 9be6544bddedf9371e79fd468ebb0aa2dbcabe54..32643555dd2a32a302d1301427e463d877c8260a 100644 (file)
@@ -175,17 +175,19 @@ mwifiex_set_ht_params(struct mwifiex_private *priv,
                switch (GET_RXSTBC(cap_info)) {
                case MWIFIEX_RX_STBC1:
                        /* HT_CAP 1X1 mode */
-                       memset(&bss_cfg->ht_cap.mcs, 0xff, 1);
+                       bss_cfg->ht_cap.mcs.rx_mask[0] = 0xff;
                        break;
                case MWIFIEX_RX_STBC12: /* fall through */
                case MWIFIEX_RX_STBC123:
                        /* HT_CAP 2X2 mode */
-                       memset(&bss_cfg->ht_cap.mcs, 0xff, 2);
+                       bss_cfg->ht_cap.mcs.rx_mask[0] = 0xff;
+                       bss_cfg->ht_cap.mcs.rx_mask[1] = 0xff;
                        break;
                default:
                        dev_warn(priv->adapter->dev,
                                 "Unsupported RX-STBC, default to 2x2\n");
-                       memset(&bss_cfg->ht_cap.mcs, 0xff, 2);
+                       bss_cfg->ht_cap.mcs.rx_mask[0] = 0xff;
+                       bss_cfg->ht_cap.mcs.rx_mask[1] = 0xff;
                        break;
                }
                priv->ap_11n_enabled = 1;
index edbe4aff00d85b569534372ea34e7e017552b234..a8ce8130cfaeeda08a2a08f7b693540fe79d9f85 100644 (file)
@@ -22,9 +22,9 @@
 
 #define USB_VERSION    "1.0"
 
+static u8 user_rmmod;
 static struct mwifiex_if_ops usb_ops;
 static struct semaphore add_remove_card_sem;
-static struct usb_card_rec *usb_card;
 
 static struct usb_device_id mwifiex_usb_table[] = {
        /* 8797 */
@@ -532,28 +532,38 @@ static int mwifiex_usb_resume(struct usb_interface *intf)
 static void mwifiex_usb_disconnect(struct usb_interface *intf)
 {
        struct usb_card_rec *card = usb_get_intfdata(intf);
+       struct mwifiex_adapter *adapter;
 
-       if (!card) {
-               pr_err("%s: card is NULL\n", __func__);
+       if (!card || !card->adapter) {
+               pr_err("%s: card or card->adapter is NULL\n", __func__);
                return;
        }
 
-       mwifiex_usb_free(card);
+       adapter = card->adapter;
+       if (!adapter->priv_num)
+               return;
 
-       if (card->adapter) {
-               struct mwifiex_adapter *adapter = card->adapter;
+       if (user_rmmod) {
+#ifdef CONFIG_PM
+               if (adapter->is_suspended)
+                       mwifiex_usb_resume(intf);
+#endif
 
-               if (!adapter->priv_num)
-                       return;
+               mwifiex_deauthenticate_all(adapter);
 
-               dev_dbg(adapter->dev, "%s: removing card\n", __func__);
-               mwifiex_remove_card(adapter, &add_remove_card_sem);
+               mwifiex_init_shutdown_fw(mwifiex_get_priv(adapter,
+                                                         MWIFIEX_BSS_ROLE_ANY),
+                                        MWIFIEX_FUNC_SHUTDOWN);
        }
 
+       mwifiex_usb_free(card);
+
+       dev_dbg(adapter->dev, "%s: removing card\n", __func__);
+       mwifiex_remove_card(adapter, &add_remove_card_sem);
+
        usb_set_intfdata(intf, NULL);
        usb_put_dev(interface_to_usbdev(intf));
        kfree(card);
-       usb_card = NULL;
 
        return;
 }
@@ -565,6 +575,7 @@ static struct usb_driver mwifiex_usb_driver = {
        .id_table = mwifiex_usb_table,
        .suspend = mwifiex_usb_suspend,
        .resume = mwifiex_usb_resume,
+       .soft_unbind = 1,
 };
 
 static int mwifiex_usb_tx_init(struct mwifiex_adapter *adapter)
@@ -762,7 +773,6 @@ static int mwifiex_register_dev(struct mwifiex_adapter *adapter)
 
        card->adapter = adapter;
        adapter->dev = &card->udev->dev;
-       usb_card = card;
 
        switch (le16_to_cpu(card->udev->descriptor.idProduct)) {
        case USB8897_PID_1:
@@ -1025,25 +1035,8 @@ static void mwifiex_usb_cleanup_module(void)
        if (!down_interruptible(&add_remove_card_sem))
                up(&add_remove_card_sem);
 
-       if (usb_card && usb_card->adapter) {
-               struct mwifiex_adapter *adapter = usb_card->adapter;
-
-               /* In case driver is removed when asynchronous FW downloading is
-                * in progress
-                */
-               wait_for_completion(&adapter->fw_load);
-
-#ifdef CONFIG_PM
-               if (adapter->is_suspended)
-                       mwifiex_usb_resume(usb_card->intf);
-#endif
-
-               mwifiex_deauthenticate_all(adapter);
-
-               mwifiex_init_shutdown_fw(mwifiex_get_priv(adapter,
-                                                         MWIFIEX_BSS_ROLE_ANY),
-                                        MWIFIEX_FUNC_SHUTDOWN);
-       }
+       /* set the flag as user is removing this module */
+       user_rmmod = 1;
 
        usb_deregister(&mwifiex_usb_driver);
 }
index 0a7cc742aed71e0fd31267305be7a26ec71b8b52..94b6c74ba727eed15bc3affaf435444fcec53feb 100644 (file)
@@ -426,15 +426,6 @@ mwifiex_wmm_init(struct mwifiex_adapter *adapter)
                                                        priv->tos_to_tid_inv[i];
                }
 
-               priv->aggr_prio_tbl[6].amsdu
-                                       = priv->aggr_prio_tbl[6].ampdu_ap
-                                       = priv->aggr_prio_tbl[6].ampdu_user
-                                       = BA_STREAM_NOT_ALLOWED;
-
-               priv->aggr_prio_tbl[7].amsdu = priv->aggr_prio_tbl[7].ampdu_ap
-                                       = priv->aggr_prio_tbl[7].ampdu_user
-                                       = BA_STREAM_NOT_ALLOWED;
-
                mwifiex_set_ba_params(priv);
                mwifiex_reset_11n_rx_seq_num(priv);
 
index eede90b63f847934a8a0bc45e63695696d88a82d..7be3a4839640c6eda6b8254fd7b81bbe5d01fd33 100644 (file)
@@ -669,7 +669,8 @@ static unsigned int p54_flush_count(struct p54_common *priv)
        return total;
 }
 
-static void p54_flush(struct ieee80211_hw *dev, u32 queues, bool drop)
+static void p54_flush(struct ieee80211_hw *dev, struct ieee80211_vif *vif,
+                     u32 queues, bool drop)
 {
        struct p54_common *priv = dev->priv;
        unsigned int total, i;
index cbf0a589d32af08c392271c7bbc3afa50f8e9c84..8330fa33e50b1e2f933f813ee187c407184780ae 100644 (file)
@@ -343,7 +343,7 @@ static void ray_detach(struct pcmcia_device *link)
        ray_release(link);
 
        local = netdev_priv(dev);
-       del_timer(&local->timer);
+       del_timer_sync(&local->timer);
 
        if (link->priv) {
                unregister_netdev(dev);
index 1b28cda6ca88124deff6c112a060f5af6692cefa..2eefbf159bc0d0abdcead9ff1caf6b867d95793f 100644 (file)
@@ -1083,7 +1083,7 @@ void rsi_inform_bss_status(struct rsi_common *common,
 {
        if (status) {
                rsi_hal_send_sta_notify_frame(common,
-                                             NL80211_IFTYPE_STATION,
+                                             RSI_IFTYPE_STATION,
                                              STA_CONNECTED,
                                              bssid,
                                              qos_enable,
@@ -1092,7 +1092,7 @@ void rsi_inform_bss_status(struct rsi_common *common,
                        rsi_send_auto_rate_request(common);
        } else {
                rsi_hal_send_sta_notify_frame(common,
-                                             NL80211_IFTYPE_STATION,
+                                             RSI_IFTYPE_STATION,
                                              STA_DISCONNECTED,
                                              bssid,
                                              qos_enable,
index ac67c4ad63c2d3177e3e70386ff27538b69b536f..225215a3b8bb484d76b47ed853afb3aeb6eb2130 100644 (file)
@@ -73,6 +73,7 @@
 #define RX_BA_INDICATION                1
 #define RSI_TBL_SZ                      40
 #define MAX_RETRIES                     8
+#define RSI_IFTYPE_STATION              0
 
 #define STD_RATE_MCS7                   0x07
 #define STD_RATE_MCS6                   0x06
index 41d4a8167dc32f368a8fdf061bea4fe9944fd0f1..c17fcf272728cb06ae25e95787003f6f59f52dba 100644 (file)
@@ -1005,10 +1005,9 @@ void rt2800_write_beacon(struct queue_entry *entry, struct txentry_desc *txdesc)
                                   entry->skb->len + padding_len);
 
        /*
-        * Enable beaconing again.
+        * Restore beaconing state.
         */
-       rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 1);
-       rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg);
+       rt2800_register_write(rt2x00dev, BCN_TIME_CFG, orig_reg);
 
        /*
         * Clean up beacon skb.
@@ -1039,13 +1038,14 @@ static inline void rt2800_clear_beacon_register(struct rt2x00_dev *rt2x00dev,
 void rt2800_clear_beacon(struct queue_entry *entry)
 {
        struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
-       u32 reg;
+       u32 orig_reg, reg;
 
        /*
         * Disable beaconing while we are reloading the beacon data,
         * otherwise we might be sending out invalid data.
         */
-       rt2800_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
+       rt2800_register_read(rt2x00dev, BCN_TIME_CFG, &orig_reg);
+       reg = orig_reg;
        rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 0);
        rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg);
 
@@ -1055,10 +1055,9 @@ void rt2800_clear_beacon(struct queue_entry *entry)
        rt2800_clear_beacon_register(rt2x00dev, entry->entry_idx);
 
        /*
-        * Enabled beaconing again.
+        * Restore beaconing state.
         */
-       rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 1);
-       rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg);
+       rt2800_register_write(rt2x00dev, BCN_TIME_CFG, orig_reg);
 }
 EXPORT_SYMBOL_GPL(rt2800_clear_beacon);
 
index e3b885d8f7dbfddda2f4ae71161b24edeefdc02c..010b76505243ed1cf15d1f176033cabd5ac23f3d 100644 (file)
@@ -1448,7 +1448,8 @@ int rt2x00mac_conf_tx(struct ieee80211_hw *hw,
                      struct ieee80211_vif *vif, u16 queue,
                      const struct ieee80211_tx_queue_params *params);
 void rt2x00mac_rfkill_poll(struct ieee80211_hw *hw);
-void rt2x00mac_flush(struct ieee80211_hw *hw, u32 queues, bool drop);
+void rt2x00mac_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+                    u32 queues, bool drop);
 int rt2x00mac_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant);
 int rt2x00mac_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant);
 void rt2x00mac_get_ringparam(struct ieee80211_hw *hw,
index ddeb5a709aa36d6375e58597104f4cc3cb632d12..212ac4842c1628a0d141104188626d55c616c487 100644 (file)
@@ -620,21 +620,19 @@ void rt2x00mac_bss_info_changed(struct ieee80211_hw *hw,
                rt2x00lib_config_intf(rt2x00dev, intf, vif->type, NULL,
                                      bss_conf->bssid);
 
-       /*
-        * Update the beacon. This is only required on USB devices. PCI
-        * devices fetch beacons periodically.
-        */
-       if (changes & BSS_CHANGED_BEACON && rt2x00_is_usb(rt2x00dev))
-               rt2x00queue_update_beacon(rt2x00dev, vif);
-
        /*
         * Start/stop beaconing.
         */
        if (changes & BSS_CHANGED_BEACON_ENABLED) {
                if (!bss_conf->enable_beacon && intf->enable_beacon) {
-                       rt2x00queue_clear_beacon(rt2x00dev, vif);
                        rt2x00dev->intf_beaconing--;
                        intf->enable_beacon = false;
+                       /*
+                        * Clear beacon in the H/W for this vif. This is needed
+                        * to disable beaconing on this particular interface
+                        * and keep it running on other interfaces.
+                        */
+                       rt2x00queue_clear_beacon(rt2x00dev, vif);
 
                        if (rt2x00dev->intf_beaconing == 0) {
                                /*
@@ -645,11 +643,15 @@ void rt2x00mac_bss_info_changed(struct ieee80211_hw *hw,
                                rt2x00queue_stop_queue(rt2x00dev->bcn);
                                mutex_unlock(&intf->beacon_skb_mutex);
                        }
-
-
                } else if (bss_conf->enable_beacon && !intf->enable_beacon) {
                        rt2x00dev->intf_beaconing++;
                        intf->enable_beacon = true;
+                       /*
+                        * Upload beacon to the H/W. This is only required on
+                        * USB devices. PCI devices fetch beacons periodically.
+                        */
+                       if (rt2x00_is_usb(rt2x00dev))
+                               rt2x00queue_update_beacon(rt2x00dev, vif);
 
                        if (rt2x00dev->intf_beaconing == 1) {
                                /*
@@ -747,7 +749,8 @@ void rt2x00mac_rfkill_poll(struct ieee80211_hw *hw)
 }
 EXPORT_SYMBOL_GPL(rt2x00mac_rfkill_poll);
 
-void rt2x00mac_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
+void rt2x00mac_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+                    u32 queues, bool drop)
 {
        struct rt2x00_dev *rt2x00dev = hw->priv;
        struct data_queue *queue;
index 24402984ee5749f272609d82907cda4a68f750f6..9048a9cbe52cb929cfbd60797a00baa40c5d6583 100644 (file)
@@ -2031,13 +2031,14 @@ static void rt61pci_write_beacon(struct queue_entry *entry,
 static void rt61pci_clear_beacon(struct queue_entry *entry)
 {
        struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
-       u32 reg;
+       u32 orig_reg, reg;
 
        /*
         * Disable beaconing while we are reloading the beacon data,
         * otherwise we might be sending out invalid data.
         */
-       rt2x00mmio_register_read(rt2x00dev, TXRX_CSR9, &reg);
+       rt2x00mmio_register_read(rt2x00dev, TXRX_CSR9, &orig_reg);
+       reg = orig_reg;
        rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_GEN, 0);
        rt2x00mmio_register_write(rt2x00dev, TXRX_CSR9, reg);
 
@@ -2048,10 +2049,9 @@ static void rt61pci_clear_beacon(struct queue_entry *entry)
                                  HW_BEACON_OFFSET(entry->entry_idx), 0);
 
        /*
-        * Enable beaconing again.
+        * Restore global beaconing state.
         */
-       rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_GEN, 1);
-       rt2x00mmio_register_write(rt2x00dev, TXRX_CSR9, reg);
+       rt2x00mmio_register_write(rt2x00dev, TXRX_CSR9, orig_reg);
 }
 
 /*
index a140170b1eb3e63625ecde7b4cc43ec6bf1b87b1..95724ff9c7268700628866f433b66e82a8a9f7c4 100644 (file)
@@ -1597,13 +1597,14 @@ static void rt73usb_clear_beacon(struct queue_entry *entry)
 {
        struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
        unsigned int beacon_base;
-       u32 reg;
+       u32 orig_reg, reg;
 
        /*
         * Disable beaconing while we are reloading the beacon data,
         * otherwise we might be sending out invalid data.
         */
-       rt2x00usb_register_read(rt2x00dev, TXRX_CSR9, &reg);
+       rt2x00usb_register_read(rt2x00dev, TXRX_CSR9, &orig_reg);
+       reg = orig_reg;
        rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_GEN, 0);
        rt2x00usb_register_write(rt2x00dev, TXRX_CSR9, reg);
 
@@ -1614,10 +1615,9 @@ static void rt73usb_clear_beacon(struct queue_entry *entry)
        rt2x00usb_register_write(rt2x00dev, beacon_base, 0);
 
        /*
-        * Enable beaconing again.
+        * Restore beaconing state.
         */
-       rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_GEN, 1);
-       rt2x00usb_register_write(rt2x00dev, TXRX_CSR9, reg);
+       rt2x00usb_register_write(rt2x00dev, TXRX_CSR9, orig_reg);
 }
 
 static int rt73usb_get_tx_data_len(struct queue_entry *entry)
index 08b056db4a3b795282d1d9e878ab400d7090bcf2..21005bd8b43c973da6ebd24556da848032727bcf 100644 (file)
@@ -1,5 +1,5 @@
-rtl8180-objs           := dev.o rtl8225.o sa2400.o max2820.o grf5101.o rtl8225se.o
+rtl818x_pci-objs       := dev.o rtl8225.o sa2400.o max2820.o grf5101.o rtl8225se.o
 
-obj-$(CONFIG_RTL8180)  += rtl8180.o
+obj-$(CONFIG_RTL8180)  += rtl818x_pci.o
 
 ccflags-y += -Idrivers/net/wireless/rtl818x
index 98d8256f037788a4d9af76c02a4e939758a08e0e..50d69b13f9848f2a4913e2452d98224284bbf7c0 100644 (file)
@@ -683,9 +683,8 @@ static void rtl8180_int_enable(struct ieee80211_hw *dev)
        struct rtl8180_priv *priv = dev->priv;
 
        if (priv->chip_family == RTL818X_CHIP_FAMILY_RTL8187SE) {
-               rtl818x_iowrite32(priv, &priv->map->IMR, IMR_TMGDOK |
-                         IMR_TBDER | IMR_THPDER |
-                         IMR_THPDER | IMR_THPDOK |
+               rtl818x_iowrite32(priv, &priv->map->IMR,
+                         IMR_TBDER | IMR_TBDOK |
                          IMR_TVODER | IMR_TVODOK |
                          IMR_TVIDER | IMR_TVIDOK |
                          IMR_TBEDER | IMR_TBEDOK |
@@ -911,7 +910,10 @@ static int rtl8180_init_hw(struct ieee80211_hw *dev)
                reg32 &= 0x00ffff00;
                reg32 |= 0xb8000054;
                rtl818x_iowrite32(priv, &priv->map->RF_PARA, reg32);
-       }
+       } else
+               /* stop unused queus (no dma alloc) */
+               rtl818x_iowrite8(priv, &priv->map->TX_DMA_POLLING,
+                           (1<<1) | (1<<2));
 
        priv->rf->init(dev);
 
index 45ea4e1c4abe157ad952be2d8a5022efc1e1715a..7abef95d278bc61336d2829f40520afc1af81a1a 100644 (file)
@@ -334,9 +334,9 @@ struct rtl818x_csr {
  * I don't like to introduce a ton of "reserved"..
  * They are for RTL8187SE
  */
-#define REG_ADDR1(addr)        ((u8 __iomem *)priv->map + addr)
-#define REG_ADDR2(addr)        ((__le16 __iomem *)priv->map + (addr >> 1))
-#define REG_ADDR4(addr)        ((__le32 __iomem *)priv->map + (addr >> 2))
+#define REG_ADDR1(addr)        ((u8 __iomem *)priv->map + (addr))
+#define REG_ADDR2(addr)        ((__le16 __iomem *)priv->map + ((addr) >> 1))
+#define REG_ADDR4(addr)        ((__le32 __iomem *)priv->map + ((addr) >> 2))
 
 #define FEMR_SE                REG_ADDR2(0x1D4)
 #define ARFR           REG_ADDR2(0x1E0)
index 4ec424f26672028550ab8b19b944d451766ee08c..b1ed6d0796f67e187fb928423edda6977c91f863 100644 (file)
@@ -1387,7 +1387,8 @@ static void rtl_op_rfkill_poll(struct ieee80211_hw *hw)
  * before switch channel or power save, or tx buffer packet
  * maybe send after offchannel or rf sleep, this may cause
  * dis-association by AP */
-static void rtl_op_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
+static void rtl_op_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+                        u32 queues, bool drop)
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
 
index 1b4101bf9974e8243124f70f40e295e841a3f0a1..347af1e4f438e57cf2c37b8975169a7c92941681 100644 (file)
@@ -93,7 +93,6 @@ int rtl88e_init_sw_vars(struct ieee80211_hw *hw)
        u8 tid;
 
        rtl8188ee_bt_reg_init(hw);
-       rtlpci->msi_support = true;
 
        rtlpriv->dm.dm_initialgain_enable = 1;
        rtlpriv->dm.dm_flag = 0;
index 06ef47cd62038cc9695078441d545be0ca1b9348..5b4c225396f244cea599bade9755951daa7d15ec 100644 (file)
@@ -293,7 +293,7 @@ static void _rtl88ee_translate_rx_signal_stuff(struct ieee80211_hw *hw,
        u8 *psaddr;
        __le16 fc;
        u16 type, ufc;
-       bool match_bssid, packet_toself, packet_beacon, addr;
+       bool match_bssid, packet_toself, packet_beacon = false, addr;
 
        tmp_buf = skb->data + pstatus->rx_drvinfo_size + pstatus->rx_bufshift;
 
index 68b5c7e92cfbc2c6a76580a2d1dbb3297730c64f..07cb06da67297244131394e60fa19c9ddf86044e 100644 (file)
@@ -1001,7 +1001,7 @@ int rtl92cu_hw_init(struct ieee80211_hw *hw)
        err = _rtl92cu_init_mac(hw);
        if (err) {
                RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "init mac failed!\n");
-               return err;
+               goto exit;
        }
        err = rtl92c_download_fw(hw);
        if (err) {
index 36b48be8329c08dad5474f43600f2b11d8fcf279..2b3c78baa9f8b3742020aa377b0449785eeb8ab7 100644 (file)
@@ -49,6 +49,12 @@ static u8 _rtl92se_map_hwqueue_to_fwqueue(struct sk_buff *skb,       u8 skb_queue)
        if (ieee80211_is_nullfunc(fc))
                return QSLT_HIGH;
 
+       /* Kernel commit 1bf4bbb4024dcdab changed EAPOL packets to use
+        * queue V0 at priority 7; however, the RTL8192SE appears to have
+        * that queue at priority 6
+        */
+       if (skb->priority == 7)
+               return QSLT_VO;
        return skb->priority;
 }
 
index b4577ebc4bb0bb5c16d068039cddc7f3fe41b744..a07213645da013157a858c78a84e300f03750eee 100644 (file)
@@ -92,7 +92,7 @@ int rtl8723be_init_sw_vars(struct ieee80211_hw *hw)
        struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
 
        rtl8723be_bt_reg_init(hw);
-       rtlpci->msi_support = true;
+       rtlpci->msi_support = false;
        rtlpriv->btcoexist.btc_ops = rtl_btc_get_ops_pointer();
 
        rtlpriv->dm.dm_initialgain_enable = 1;
index ed88d39134839e34510d83252949dbbb2a964e83..077eb5b9cd74c7a119c1e74ebbb109477a857ff3 100644 (file)
@@ -5184,7 +5184,8 @@ out:
        mutex_unlock(&wl->mutex);
 }
 
-static void wlcore_op_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
+static void wlcore_op_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+                           u32 queues, bool drop)
 {
        struct wl1271 *wl = hw->priv;
 
index 29ef2492951fcdaa372611449fbc4c3cde4322f1..d3dd7bfdf3f1f33efbfbd9e0be762f664c892926 100644 (file)
@@ -217,7 +217,7 @@ static struct wl1271_if_operations sdio_ops = {
 static int wl1271_probe(struct sdio_func *func,
                                  const struct sdio_device_id *id)
 {
-       struct wlcore_platdev_data *pdev_data;
+       struct wlcore_platdev_data pdev_data;
        struct wl12xx_sdio_glue *glue;
        struct resource res[1];
        mmc_pm_flag_t mmcflags;
@@ -228,16 +228,13 @@ static int wl1271_probe(struct sdio_func *func,
        if (func->num != 0x02)
                return -ENODEV;
 
-       pdev_data = kzalloc(sizeof(*pdev_data), GFP_KERNEL);
-       if (!pdev_data)
-               goto out;
-
-       pdev_data->if_ops = &sdio_ops;
+       memset(&pdev_data, 0x00, sizeof(pdev_data));
+       pdev_data.if_ops = &sdio_ops;
 
        glue = kzalloc(sizeof(*glue), GFP_KERNEL);
        if (!glue) {
                dev_err(&func->dev, "can't allocate glue\n");
-               goto out_free_pdev_data;
+               goto out;
        }
 
        glue->dev = &func->dev;
@@ -248,9 +245,9 @@ static int wl1271_probe(struct sdio_func *func,
        /* Use block mode for transferring over one block size of data */
        func->card->quirks |= MMC_QUIRK_BLKSZ_FOR_BYTE_MODE;
 
-       pdev_data->pdata = wl12xx_get_platform_data();
-       if (IS_ERR(pdev_data->pdata)) {
-               ret = PTR_ERR(pdev_data->pdata);
+       pdev_data.pdata = wl12xx_get_platform_data();
+       if (IS_ERR(pdev_data.pdata)) {
+               ret = PTR_ERR(pdev_data.pdata);
                dev_err(glue->dev, "missing wlan platform data: %d\n", ret);
                goto out_free_glue;
        }
@@ -260,7 +257,7 @@ static int wl1271_probe(struct sdio_func *func,
        dev_dbg(glue->dev, "sdio PM caps = 0x%x\n", mmcflags);
 
        if (mmcflags & MMC_PM_KEEP_POWER)
-               pdev_data->pdata->pwr_in_suspend = true;
+               pdev_data.pdata->pwr_in_suspend = true;
 
        sdio_set_drvdata(func, glue);
 
@@ -289,7 +286,7 @@ static int wl1271_probe(struct sdio_func *func,
 
        memset(res, 0x00, sizeof(res));
 
-       res[0].start = pdev_data->pdata->irq;
+       res[0].start = pdev_data.pdata->irq;
        res[0].flags = IORESOURCE_IRQ;
        res[0].name = "irq";
 
@@ -299,8 +296,8 @@ static int wl1271_probe(struct sdio_func *func,
                goto out_dev_put;
        }
 
-       ret = platform_device_add_data(glue->core, pdev_data,
-                                      sizeof(*pdev_data));
+       ret = platform_device_add_data(glue->core, &pdev_data,
+                                      sizeof(pdev_data));
        if (ret) {
                dev_err(glue->dev, "can't add platform data\n");
                goto out_dev_put;
@@ -319,9 +316,6 @@ out_dev_put:
 out_free_glue:
        kfree(glue);
 
-out_free_pdev_data:
-       kfree(pdev_data);
-
 out:
        return ret;
 }
index dbe826dd7c23c49a38a08988cb24c50764d3efaa..5f3a389dd74cdf573c6d08188fb70edcba10ccfa 100644 (file)
@@ -327,27 +327,25 @@ static struct wl1271_if_operations spi_ops = {
 static int wl1271_probe(struct spi_device *spi)
 {
        struct wl12xx_spi_glue *glue;
-       struct wlcore_platdev_data *pdev_data;
+       struct wlcore_platdev_data pdev_data;
        struct resource res[1];
        int ret = -ENOMEM;
 
-       pdev_data = kzalloc(sizeof(*pdev_data), GFP_KERNEL);
-       if (!pdev_data)
-               goto out;
+       memset(&pdev_data, 0x00, sizeof(pdev_data));
 
-       pdev_data->pdata = dev_get_platdata(&spi->dev);
-       if (!pdev_data->pdata) {
+       pdev_data.pdata = dev_get_platdata(&spi->dev);
+       if (!pdev_data.pdata) {
                dev_err(&spi->dev, "no platform data\n");
                ret = -ENODEV;
-               goto out_free_pdev_data;
+               goto out;
        }
 
-       pdev_data->if_ops = &spi_ops;
+       pdev_data.if_ops = &spi_ops;
 
        glue = kzalloc(sizeof(*glue), GFP_KERNEL);
        if (!glue) {
                dev_err(&spi->dev, "can't allocate glue\n");
-               goto out_free_pdev_data;
+               goto out;
        }
 
        glue->dev = &spi->dev;
@@ -385,8 +383,8 @@ static int wl1271_probe(struct spi_device *spi)
                goto out_dev_put;
        }
 
-       ret = platform_device_add_data(glue->core, pdev_data,
-                                      sizeof(*pdev_data));
+       ret = platform_device_add_data(glue->core, &pdev_data,
+                                      sizeof(pdev_data));
        if (ret) {
                dev_err(glue->dev, "can't add platform data\n");
                goto out_dev_put;
@@ -406,9 +404,6 @@ out_dev_put:
 out_free_glue:
        kfree(glue);
 
-out_free_pdev_data:
-       kfree(pdev_data);
-
 out:
        return ret;
 }
index ef05c5c49d413d5bb23a3e4adbff88cb0c9ad7cd..a7557331699f9c92d13ab2c75ab71f7f3c31df17 100644 (file)
@@ -386,7 +386,7 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
                NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
                NETIF_F_TSO | NETIF_F_TSO6;
        dev->features = dev->hw_features | NETIF_F_RXCSUM;
-       SET_ETHTOOL_OPS(dev, &xenvif_ethtool_ops);
+       dev->ethtool_ops = &xenvif_ethtool_ops;
 
        dev->tx_queue_len = XENVIF_QUEUE_LENGTH;
 
index 158b5e639fc7307d5a98580cfd65ca23b9d3db8a..895355de8ac467bdc9a04646becf0f5e85e136a1 100644 (file)
@@ -1332,7 +1332,7 @@ static struct net_device *xennet_create_dev(struct xenbus_device *dev)
          */
        netdev->features |= netdev->hw_features;
 
-       SET_ETHTOOL_OPS(netdev, &xennet_ethtool_ops);
+       netdev->ethtool_ops = &xennet_ethtool_ops;
        SET_NETDEV_DEV(netdev, &dev->dev);
 
        netif_set_gso_max_size(netdev, XEN_NETIF_MAX_TX_SIZE - MAX_TCP_HEADER);
index 9bcf2cf19357837dcb6dd8e5cce637150a7a32d0..5aeb89411350a4c98a2d769302e16a8d650d075f 100644 (file)
@@ -364,7 +364,7 @@ int of_irq_to_resource(struct device_node *dev, int index, struct resource *r)
 
                memset(r, 0, sizeof(*r));
                /*
-                * Get optional "interrupts-names" property to add a name
+                * Get optional "interrupt-names" property to add a name
                 * to the resource.
                 */
                of_property_read_string_index(dev, "interrupt-names", index,
@@ -379,6 +379,32 @@ int of_irq_to_resource(struct device_node *dev, int index, struct resource *r)
 }
 EXPORT_SYMBOL_GPL(of_irq_to_resource);
 
+/**
+ * of_irq_get - Decode a node's IRQ and return it as a Linux irq number
+ * @dev: pointer to device tree node
+ * @index: zero-based index of the irq
+ *
+ * Returns Linux irq number on success, or -EPROBE_DEFER if the irq domain
+ * is not yet created.
+ *
+ */
+int of_irq_get(struct device_node *dev, int index)
+{
+       int rc;
+       struct of_phandle_args oirq;
+       struct irq_domain *domain;
+
+       rc = of_irq_parse_one(dev, index, &oirq);
+       if (rc)
+               return rc;
+
+       domain = irq_find_host(oirq.np);
+       if (!domain)
+               return -EPROBE_DEFER;
+
+       return irq_create_of_mapping(&oirq);
+}
+
 /**
  * of_irq_count - Count the number of IRQs a node uses
  * @dev: pointer to device tree node
index 9a95831bd065c2ba1c5af83f6a73927a3b9d8181..b85709458639b0b9df9d3fe1da54e766b721a17d 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/netdevice.h>
 #include <linux/err.h>
 #include <linux/phy.h>
+#include <linux/phy_fixed.h>
 #include <linux/of.h>
 #include <linux/of_irq.h>
 #include <linux/of_mdio.h>
 MODULE_AUTHOR("Grant Likely <grant.likely@secretlab.ca>");
 MODULE_LICENSE("GPL");
 
-static void of_set_phy_supported(struct phy_device *phydev, u32 max_speed)
-{
-       /* The default values for phydev->supported are provided by the PHY
-        * driver "features" member, we want to reset to sane defaults fist
-        * before supporting higher speeds.
-        */
-       phydev->supported &= PHY_DEFAULT_FEATURES;
-
-       switch (max_speed) {
-       default:
-               return;
-
-       case SPEED_1000:
-               phydev->supported |= PHY_1000BT_FEATURES;
-       case SPEED_100:
-               phydev->supported |= PHY_100BT_FEATURES;
-       case SPEED_10:
-               phydev->supported |= PHY_10BT_FEATURES;
-       }
-}
-
 /* Extract the clause 22 phy ID from the compatible string of the form
  * ethernet-phy-idAAAA.BBBB */
 static int of_get_phy_id(struct device_node *device, u32 *phy_id)
@@ -103,11 +83,6 @@ static int of_mdiobus_register_phy(struct mii_bus *mdio, struct device_node *chi
                return 1;
        }
 
-       /* Set phydev->supported based on the "max-speed" property
-        * if present */
-       if (!of_property_read_u32(child, "max-speed", &max_speed))
-               of_set_phy_supported(phy, max_speed);
-
        dev_dbg(&mdio->dev, "registered phy %s at address %i\n",
                child->name, addr);
 
@@ -244,44 +219,6 @@ struct phy_device *of_phy_connect(struct net_device *dev,
 }
 EXPORT_SYMBOL(of_phy_connect);
 
-/**
- * of_phy_connect_fixed_link - Parse fixed-link property and return a dummy phy
- * @dev: pointer to net_device claiming the phy
- * @hndlr: Link state callback for the network device
- * @iface: PHY data interface type
- *
- * This function is a temporary stop-gap and will be removed soon.  It is
- * only to support the fs_enet, ucc_geth and gianfar Ethernet drivers.  Do
- * not call this function from new drivers.
- */
-struct phy_device *of_phy_connect_fixed_link(struct net_device *dev,
-                                            void (*hndlr)(struct net_device *),
-                                            phy_interface_t iface)
-{
-       struct device_node *net_np;
-       char bus_id[MII_BUS_ID_SIZE + 3];
-       struct phy_device *phy;
-       const __be32 *phy_id;
-       int sz;
-
-       if (!dev->dev.parent)
-               return NULL;
-
-       net_np = dev->dev.parent->of_node;
-       if (!net_np)
-               return NULL;
-
-       phy_id = of_get_property(net_np, "fixed-link", &sz);
-       if (!phy_id || sz < sizeof(*phy_id))
-               return NULL;
-
-       sprintf(bus_id, PHY_ID_FMT, "fixed-0", be32_to_cpu(phy_id[0]));
-
-       phy = phy_connect(dev, bus_id, hndlr, iface);
-       return IS_ERR(phy) ? NULL : phy;
-}
-EXPORT_SYMBOL(of_phy_connect_fixed_link);
-
 /**
  * of_phy_attach - Attach to a PHY without starting the state machine
  * @dev: pointer to net_device claiming the phy
@@ -301,3 +238,69 @@ struct phy_device *of_phy_attach(struct net_device *dev,
        return phy_attach_direct(dev, phy, flags, iface) ? NULL : phy;
 }
 EXPORT_SYMBOL(of_phy_attach);
+
+#if defined(CONFIG_FIXED_PHY)
+/*
+ * of_phy_is_fixed_link() and of_phy_register_fixed_link() must
+ * support two DT bindings:
+ * - the old DT binding, where 'fixed-link' was a property with 5
+ *   cells encoding various informations about the fixed PHY
+ * - the new DT binding, where 'fixed-link' is a sub-node of the
+ *   Ethernet device.
+ */
+bool of_phy_is_fixed_link(struct device_node *np)
+{
+       struct device_node *dn;
+       int len;
+
+       /* New binding */
+       dn = of_get_child_by_name(np, "fixed-link");
+       if (dn) {
+               of_node_put(dn);
+               return true;
+       }
+
+       /* Old binding */
+       if (of_get_property(np, "fixed-link", &len) &&
+           len == (5 * sizeof(__be32)))
+               return true;
+
+       return false;
+}
+EXPORT_SYMBOL(of_phy_is_fixed_link);
+
+int of_phy_register_fixed_link(struct device_node *np)
+{
+       struct fixed_phy_status status = {};
+       struct device_node *fixed_link_node;
+       const __be32 *fixed_link_prop;
+       int len;
+
+       /* New binding */
+       fixed_link_node = of_get_child_by_name(np, "fixed-link");
+       if (fixed_link_node) {
+               status.link = 1;
+               status.duplex = of_property_read_bool(np, "full-duplex");
+               if (of_property_read_u32(fixed_link_node, "speed", &status.speed))
+                       return -EINVAL;
+               status.pause = of_property_read_bool(np, "pause");
+               status.asym_pause = of_property_read_bool(np, "asym-pause");
+               of_node_put(fixed_link_node);
+               return fixed_phy_register(PHY_POLL, &status, np);
+       }
+
+       /* Old binding */
+       fixed_link_prop = of_get_property(np, "fixed-link", &len);
+       if (fixed_link_prop && len == (5 * sizeof(__be32))) {
+               status.link = 1;
+               status.duplex = be32_to_cpu(fixed_link_prop[1]);
+               status.speed = be32_to_cpu(fixed_link_prop[2]);
+               status.pause = be32_to_cpu(fixed_link_prop[3]);
+               status.asym_pause = be32_to_cpu(fixed_link_prop[4]);
+               return fixed_phy_register(PHY_POLL, &status, np);
+       }
+
+       return -ENODEV;
+}
+EXPORT_SYMBOL(of_phy_register_fixed_link);
+#endif
index 404d1daebefa7d7a5d02cbfb611e9585077c00b0..bd47fbc53dc96258fba942d073ee320f25e5627f 100644 (file)
@@ -168,7 +168,9 @@ struct platform_device *of_device_alloc(struct device_node *np,
                        rc = of_address_to_resource(np, i, res);
                        WARN_ON(rc);
                }
-               WARN_ON(of_irq_to_resource_table(np, res, num_irq) != num_irq);
+               if (of_irq_to_resource_table(np, res, num_irq) != num_irq)
+                       pr_debug("not all legacy IRQ resources mapped for %s\n",
+                                np->name);
        }
 
        dev->dev.of_node = of_node_get(np);
index ae4450070503f1067579f02576ce5ec14b2a7192..fe70b86bcffb9d086edd51c758a17a60ca45cf7b 100644 (file)
@@ -10,6 +10,7 @@
 #include <linux/module.h>
 #include <linux/of.h>
 #include <linux/of_irq.h>
+#include <linux/of_platform.h>
 #include <linux/list.h>
 #include <linux/mutex.h>
 #include <linux/slab.h>
@@ -427,6 +428,36 @@ static void __init of_selftest_match_node(void)
        }
 }
 
+static void __init of_selftest_platform_populate(void)
+{
+       int irq;
+       struct device_node *np;
+       struct platform_device *pdev;
+
+       np = of_find_node_by_path("/testcase-data");
+       of_platform_populate(np, of_default_bus_match_table, NULL, NULL);
+
+       /* Test that a missing irq domain returns -EPROBE_DEFER */
+       np = of_find_node_by_path("/testcase-data/testcase-device1");
+       pdev = of_find_device_by_node(np);
+       if (!pdev)
+               selftest(0, "device 1 creation failed\n");
+       irq = platform_get_irq(pdev, 0);
+       if (irq != -EPROBE_DEFER)
+               selftest(0, "device deferred probe failed - %d\n", irq);
+
+       /* Test that a parsing failure does not return -EPROBE_DEFER */
+       np = of_find_node_by_path("/testcase-data/testcase-device2");
+       pdev = of_find_device_by_node(np);
+       if (!pdev)
+               selftest(0, "device 2 creation failed\n");
+       irq = platform_get_irq(pdev, 0);
+       if (irq >= 0 || irq == -EPROBE_DEFER)
+               selftest(0, "device parsing error failed - %d\n", irq);
+
+       selftest(1, "passed");
+}
+
 static int __init of_selftest(void)
 {
        struct device_node *np;
@@ -445,6 +476,7 @@ static int __init of_selftest(void)
        of_selftest_parse_interrupts();
        of_selftest_parse_interrupts_extended();
        of_selftest_match_node();
+       of_selftest_platform_populate();
        pr_info("end of selftest - %i passed, %i failed\n",
                selftest_results.passed, selftest_results.failed);
        return 0;
index c843720bd3e53d7e7f632f4c75152edbefbc7d5f..da4695f60351ad4c7c24aecf791a5a6c2b71edfc 100644 (file)
                                                      <&test_intmap1 1 2>;
                        };
                };
+
+               testcase-device1 {
+                       compatible = "testcase-device";
+                       interrupt-parent = <&test_intc0>;
+                       interrupts = <1>;
+               };
+
+               testcase-device2 {
+                       compatible = "testcase-device";
+                       interrupt-parent = <&test_intc2>;
+                       interrupts = <1>; /* invalid specifier - too short */
+               };
        };
+
 };
index 3bb05f17b9b4edcd05cc15e582aa049e64d01df5..4906c27fa3bd9f5a8fb4da35981fabd170ccbc60 100644 (file)
@@ -33,6 +33,7 @@ config PHY_MVEBU_SATA
 
 config OMAP_CONTROL_PHY
        tristate "OMAP CONTROL PHY Driver"
+       depends on ARCH_OMAP2PLUS || COMPILE_TEST
        help
          Enable this to add support for the PHY part present in the control
          module. This driver has API to power on the USB2 PHY and to write to
index 2faf78edc8649f2c5ef79c3093aaa14de04b0eea..7728518572a4ea4c0fdb9e50ccde640c654d47b1 100644 (file)
@@ -13,8 +13,9 @@ obj-$(CONFIG_TI_PIPE3)                        += phy-ti-pipe3.o
 obj-$(CONFIG_TWL4030_USB)              += phy-twl4030-usb.o
 obj-$(CONFIG_PHY_EXYNOS5250_SATA)      += phy-exynos5250-sata.o
 obj-$(CONFIG_PHY_SUN4I_USB)            += phy-sun4i-usb.o
-obj-$(CONFIG_PHY_SAMSUNG_USB2)         += phy-samsung-usb2.o
-obj-$(CONFIG_PHY_EXYNOS4210_USB2)      += phy-exynos4210-usb2.o
-obj-$(CONFIG_PHY_EXYNOS4X12_USB2)      += phy-exynos4x12-usb2.o
-obj-$(CONFIG_PHY_EXYNOS5250_USB2)      += phy-exynos5250-usb2.o
+obj-$(CONFIG_PHY_SAMSUNG_USB2)         += phy-exynos-usb2.o
+phy-exynos-usb2-y                      += phy-samsung-usb2.o
+phy-exynos-usb2-$(CONFIG_PHY_EXYNOS4210_USB2)  += phy-exynos4210-usb2.o
+phy-exynos-usb2-$(CONFIG_PHY_EXYNOS4X12_USB2)  += phy-exynos4x12-usb2.o
+phy-exynos-usb2-$(CONFIG_PHY_EXYNOS5250_USB2)  += phy-exynos5250-usb2.o
 obj-$(CONFIG_PHY_XGENE)                        += phy-xgene.o
index 623b71c54b3e5f0a865d8563c8557919e9603900..c64a2f3b2d624fb9d8c266b979416d6b53ed5b9f 100644 (file)
@@ -64,6 +64,9 @@ static struct phy *phy_lookup(struct device *device, const char *port)
        class_dev_iter_init(&iter, phy_class, NULL, NULL);
        while ((dev = class_dev_iter_next(&iter))) {
                phy = to_phy(dev);
+
+               if (!phy->init_data)
+                       continue;
                count = phy->init_data->num_consumers;
                consumers = phy->init_data->consumers;
                while (count--) {
index 92ed4b2e3c0716cf3f21580b49434fc6e9952fc6..c862f9c0e9ce4cb356222b496c40ca1ac26b0699 100644 (file)
@@ -64,7 +64,6 @@ struct as3722_pin_function {
 };
 
 struct as3722_gpio_pin_control {
-       bool enable_gpio_invert;
        unsigned mode_prop;
        int io_function;
 };
@@ -320,10 +319,8 @@ static int as3722_pinctrl_gpio_set_direction(struct pinctrl_dev *pctldev,
                return mode;
        }
 
-       if (as_pci->gpio_control[offset].enable_gpio_invert)
-               mode |= AS3722_GPIO_INV;
-
-       return as3722_write(as3722, AS3722_GPIOn_CONTROL_REG(offset), mode);
+       return as3722_update_bits(as3722, AS3722_GPIOn_CONTROL_REG(offset),
+                               AS3722_GPIO_MODE_MASK, mode);
 }
 
 static const struct pinmux_ops as3722_pinmux_ops = {
@@ -496,10 +493,18 @@ static void as3722_gpio_set(struct gpio_chip *chip, unsigned offset,
 {
        struct as3722_pctrl_info *as_pci = to_as_pci(chip);
        struct as3722 *as3722 = as_pci->as3722;
-       int en_invert = as_pci->gpio_control[offset].enable_gpio_invert;
+       int en_invert;
        u32 val;
        int ret;
 
+       ret = as3722_read(as3722, AS3722_GPIOn_CONTROL_REG(offset), &val);
+       if (ret < 0) {
+               dev_err(as_pci->dev,
+                       "GPIO_CONTROL%d_REG read failed: %d\n", offset, ret);
+               return;
+       }
+       en_invert = !!(val & AS3722_GPIO_INV);
+
        if (value)
                val = (en_invert) ? 0 : AS3722_GPIOn_SIGNAL(offset);
        else
index 81075f2a1d3f87d9ac9d2cf4d62edf94e21bf75f..2960557bfed95c6d79f316c020ec98c426f38f83 100644 (file)
@@ -810,6 +810,7 @@ static const struct pinconf_ops pcs_pinconf_ops = {
 static int pcs_add_pin(struct pcs_device *pcs, unsigned offset,
                unsigned pin_pos)
 {
+       struct pcs_soc_data *pcs_soc = &pcs->socdata;
        struct pinctrl_pin_desc *pin;
        struct pcs_name *pn;
        int i;
@@ -821,6 +822,18 @@ static int pcs_add_pin(struct pcs_device *pcs, unsigned offset,
                return -ENOMEM;
        }
 
+       if (pcs_soc->irq_enable_mask) {
+               unsigned val;
+
+               val = pcs->read(pcs->base + offset);
+               if (val & pcs_soc->irq_enable_mask) {
+                       dev_dbg(pcs->dev, "irq enabled at boot for pin at %lx (%x), clearing\n",
+                               (unsigned long)pcs->res->start + offset, val);
+                       val &= ~pcs_soc->irq_enable_mask;
+                       pcs->write(val, pcs->base + offset);
+               }
+       }
+
        pin = &pcs->pins.pa[i];
        pn = &pcs->names[i];
        sprintf(pn->name, "%lx.%d",
index c5e0f6973a3b06c3e197eccbe7ba917a27d1c3ec..26ca6855f478d3018f79ab8aab87550e1eb52610 100644 (file)
@@ -629,9 +629,8 @@ static int tb10x_gpio_request_enable(struct pinctrl_dev *pctl,
         */
        for (i = 0; i < state->pinfuncgrpcnt; i++) {
                const struct tb10x_pinfuncgrp *pfg = &state->pingroups[i];
-               unsigned int port = pfg->port;
                unsigned int mode = pfg->mode;
-               int j;
+               int j, port = pfg->port;
 
                /*
                 * Skip pin groups which are always mapped and don't need
index 48093719167abd91e27f93eb869eab3f1edd5c51..f5cd3f9618083bacca6414489db5e83b3dc128d9 100644 (file)
@@ -4794,8 +4794,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
                FN_MSIOF0_SCK_B, 0,
                /* IP5_23_21 [3] */
                FN_WE1_N, FN_IERX, FN_CAN1_RX, FN_VI1_G4,
-               FN_VI1_G4_B, FN_VI2_R6, FN_SCIFA0_CTS_N_B,
-               FN_IERX_C, 0,
+               FN_VI1_G4_B, FN_VI2_R6, FN_SCIFA0_CTS_N_B, FN_IERX_C,
                /* IP5_20_18 [3] */
                FN_WE0_N, FN_IECLK, FN_CAN_CLK,
                FN_VI2_VSYNC_N, FN_SCIFA0_TXD_B, FN_VI2_VSYNC_N_B, 0, 0,
index 5186d70c49d43326bc0a3e1f0405332d512cb989..7868bf3a0f911dccfbe7b516c469ac63f6422e4c 100644 (file)
@@ -5288,7 +5288,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
                /* SEL_SCIF3 [2] */
                FN_SEL_SCIF3_0, FN_SEL_SCIF3_1, FN_SEL_SCIF3_2, FN_SEL_SCIF3_3,
                /* SEL_IEB [2] */
-               FN_SEL_IEB_0, FN_SEL_IEB_1, FN_SEL_IEB_2,
+               FN_SEL_IEB_0, FN_SEL_IEB_1, FN_SEL_IEB_2, 0,
                /* SEL_MMC [1] */
                FN_SEL_MMC_0, FN_SEL_MMC_1,
                /* SEL_SCIF5 [1] */
index 9f611cbbc294ea8c5ae84023e132e02152e36f85..c31aa07b3ba55541ff434adf45aed76adb0adee3 100644 (file)
@@ -83,8 +83,7 @@ static int pnpacpi_set_resources(struct pnp_dev *dev)
 {
        struct acpi_device *acpi_dev;
        acpi_handle handle;
-       struct acpi_buffer buffer;
-       int ret;
+       int ret = 0;
 
        pnp_dbg(&dev->dev, "set resources\n");
 
@@ -97,19 +96,26 @@ static int pnpacpi_set_resources(struct pnp_dev *dev)
        if (WARN_ON_ONCE(acpi_dev != dev->data))
                dev->data = acpi_dev;
 
-       ret = pnpacpi_build_resource_template(dev, &buffer);
-       if (ret)
-               return ret;
-       ret = pnpacpi_encode_resources(dev, &buffer);
-       if (ret) {
+       if (acpi_has_method(handle, METHOD_NAME__SRS)) {
+               struct acpi_buffer buffer;
+
+               ret = pnpacpi_build_resource_template(dev, &buffer);
+               if (ret)
+                       return ret;
+
+               ret = pnpacpi_encode_resources(dev, &buffer);
+               if (!ret) {
+                       acpi_status status;
+
+                       status = acpi_set_current_resources(handle, &buffer);
+                       if (ACPI_FAILURE(status))
+                               ret = -EIO;
+               }
                kfree(buffer.pointer);
-               return ret;
        }
-       if (ACPI_FAILURE(acpi_set_current_resources(handle, &buffer)))
-               ret = -EINVAL;
-       else if (acpi_bus_power_manageable(handle))
+       if (!ret && acpi_bus_power_manageable(handle))
                ret = acpi_bus_set_power(handle, ACPI_STATE_D0);
-       kfree(buffer.pointer);
+
        return ret;
 }
 
@@ -117,7 +123,7 @@ static int pnpacpi_disable_resources(struct pnp_dev *dev)
 {
        struct acpi_device *acpi_dev;
        acpi_handle handle;
-       int ret;
+       acpi_status status;
 
        dev_dbg(&dev->dev, "disable resources\n");
 
@@ -128,13 +134,15 @@ static int pnpacpi_disable_resources(struct pnp_dev *dev)
        }
 
        /* acpi_unregister_gsi(pnp_irq(dev, 0)); */
-       ret = 0;
        if (acpi_bus_power_manageable(handle))
                acpi_bus_set_power(handle, ACPI_STATE_D3_COLD);
-               /* continue even if acpi_bus_set_power() fails */
-       if (ACPI_FAILURE(acpi_evaluate_object(handle, "_DIS", NULL, NULL)))
-               ret = -ENODEV;
-       return ret;
+
+       /* continue even if acpi_bus_set_power() fails */
+       status = acpi_evaluate_object(handle, "_DIS", NULL, NULL);
+       if (ACPI_FAILURE(status) && status != AE_NOT_FOUND)
+               return -ENODEV;
+
+       return 0;
 }
 
 #ifdef CONFIG_ACPI_SLEEP
index 258fef272ea7d6b61faa565260c8fc7b7a4f995d..ebf0d6710b5a0d4150627313a3edca99e50a8745 100644 (file)
@@ -15,6 +15,7 @@
 
 #include <linux/types.h>
 #include <linux/kernel.h>
+#include <linux/pci.h>
 #include <linux/string.h>
 #include <linux/slab.h>
 #include <linux/pnp.h>
@@ -334,6 +335,81 @@ static void quirk_amd_mmconfig_area(struct pnp_dev *dev)
 }
 #endif
 
+#ifdef CONFIG_PCI
+/* Device IDs of parts that have 32KB MCH space */
+static const unsigned int mch_quirk_devices[] = {
+       0x0154, /* Ivy Bridge */
+       0x0c00, /* Haswell */
+};
+
+static struct pci_dev *get_intel_host(void)
+{
+       int i;
+       struct pci_dev *host;
+
+       for (i = 0; i < ARRAY_SIZE(mch_quirk_devices); i++) {
+               host = pci_get_device(PCI_VENDOR_ID_INTEL, mch_quirk_devices[i],
+                                     NULL);
+               if (host)
+                       return host;
+       }
+       return NULL;
+}
+
+static void quirk_intel_mch(struct pnp_dev *dev)
+{
+       struct pci_dev *host;
+       u32 addr_lo, addr_hi;
+       struct pci_bus_region region;
+       struct resource mch;
+       struct pnp_resource *pnp_res;
+       struct resource *res;
+
+       host = get_intel_host();
+       if (!host)
+               return;
+
+       /*
+        * MCHBAR is not an architected PCI BAR, so MCH space is usually
+        * reported as a PNP0C02 resource.  The MCH space was originally
+        * 16KB, but is 32KB in newer parts.  Some BIOSes still report a
+        * PNP0C02 resource that is only 16KB, which means the rest of the
+        * MCH space is consumed but unreported.
+        */
+
+       /*
+        * Read MCHBAR for Host Member Mapped Register Range Base
+        * https://www-ssl.intel.com/content/www/us/en/processors/core/4th-gen-core-family-desktop-vol-2-datasheet
+        * Sec 3.1.12.
+        */
+       pci_read_config_dword(host, 0x48, &addr_lo);
+       region.start = addr_lo & ~0x7fff;
+       pci_read_config_dword(host, 0x4c, &addr_hi);
+       region.start |= (u64) addr_hi << 32;
+       region.end = region.start + 32*1024 - 1;
+
+       memset(&mch, 0, sizeof(mch));
+       mch.flags = IORESOURCE_MEM;
+       pcibios_bus_to_resource(host->bus, &mch, &region);
+
+       list_for_each_entry(pnp_res, &dev->resources, list) {
+               res = &pnp_res->res;
+               if (res->end < mch.start || res->start > mch.end)
+                       continue;       /* no overlap */
+               if (res->start == mch.start && res->end == mch.end)
+                       continue;       /* exact match */
+
+               dev_info(&dev->dev, FW_BUG "PNP resource %pR covers only part of %s Intel MCH; extending to %pR\n",
+                        res, pci_name(host), &mch);
+               res->start = mch.start;
+               res->end = mch.end;
+               break;
+       }
+
+       pci_dev_put(host);
+}
+#endif
+
 /*
  *  PnP Quirks
  *  Cards or devices that need some tweaking due to incomplete resource info
@@ -363,6 +439,9 @@ static struct pnp_fixup pnp_fixups[] = {
        {"PNP0c02", quirk_system_pci_resources},
 #ifdef CONFIG_AMD_NB
        {"PNP0c01", quirk_amd_mmconfig_area},
+#endif
+#ifdef CONFIG_PCI
+       {"PNP0c02", quirk_intel_mch},
 #endif
        {""}
 };
index 476aa495c110d5814fdf2e3c4ba331cb026baf4f..b95cf71ed69554e8b7c53d77271ba92e39b1e039 100644 (file)
@@ -11,7 +11,7 @@
  * Copyright (C) 2012 ARM Limited
  */
 
-#include <linux/jiffies.h>
+#include <linux/delay.h>
 #include <linux/of.h>
 #include <linux/of_device.h>
 #include <linux/platform_device.h>
 static void vexpress_reset_do(struct device *dev, const char *what)
 {
        int err = -ENOENT;
-       struct vexpress_config_func *func =
-                       vexpress_config_func_get_by_dev(dev);
+       struct vexpress_config_func *func = dev_get_drvdata(dev);
 
        if (func) {
-               unsigned long timeout;
-
                err = vexpress_config_write(func, 0, 0);
-
-               timeout = jiffies + HZ;
-               while (time_before(jiffies, timeout))
-                       cpu_relax();
+               if (!err)
+                       mdelay(1000);
        }
 
        dev_emerg(dev, "Unable to %s (%d)\n", what, err);
@@ -96,12 +91,18 @@ static int vexpress_reset_probe(struct platform_device *pdev)
        enum vexpress_reset_func func;
        const struct of_device_id *match =
                        of_match_device(vexpress_reset_of_match, &pdev->dev);
+       struct vexpress_config_func *config_func;
 
        if (match)
                func = (enum vexpress_reset_func)match->data;
        else
                func = pdev->id_entry->driver_data;
 
+       config_func = vexpress_config_func_get_by_dev(&pdev->dev);
+       if (!config_func)
+               return -EINVAL;
+       dev_set_drvdata(&pdev->dev, config_func);
+
        switch (func) {
        case FUNC_SHUTDOWN:
                vexpress_power_off_device = &pdev->dev;
index 6963bdf5417593921122694d8ae425ff7a599f7e..6aea373547f65f3743faa7236b5035a83e178966 100644 (file)
@@ -6,6 +6,7 @@ menu "PTP clock support"
 
 config PTP_1588_CLOCK
        tristate "PTP clock support"
+       depends on NET
        select PPS
        select NET_PTP_CLASSIFY
        help
@@ -74,7 +75,7 @@ config DP83640_PHY
 config PTP_1588_CLOCK_PCH
        tristate "Intel PCH EG20T as PTP clock"
        depends on X86 || COMPILE_TEST
-       depends on HAS_IOMEM
+       depends on HAS_IOMEM && NET
        select PTP_1588_CLOCK
        help
          This driver adds support for using the PCH EG20T as a PTP
index e25d2bc898e5b6e4eb7b43e6df87129a2a19781e..296b0ec8744da915763f8444c2ae8e902376c33e 100644 (file)
@@ -142,7 +142,10 @@ static int ptp_clock_adjtime(struct posix_clock *pc, struct timex *tx)
                delta = ktime_to_ns(kt);
                err = ops->adjtime(ops, delta);
        } else if (tx->modes & ADJ_FREQUENCY) {
-               err = ops->adjfreq(ops, scaled_ppm_to_ppb(tx->freq));
+               s32 ppb = scaled_ppm_to_ppb(tx->freq);
+               if (ppb > ops->max_adj || ppb < -ops->max_adj)
+                       return -ERANGE;
+               err = ops->adjfreq(ops, ppb);
                ptp->dialed_frequency = tx->freq;
        } else if (tx->modes == 0) {
                tx->freq = ptp->dialed_frequency;
index ded3b35742091dac92192c45f1c0e7f24565f924..6d38be3d970ca72b48d0a44c950da341071edb53 100644 (file)
@@ -38,66 +38,24 @@ struct pbias_reg_info {
 struct pbias_regulator_data {
        struct regulator_desc desc;
        void __iomem *pbias_addr;
-       unsigned int pbias_reg;
        struct regulator_dev *dev;
        struct regmap *syscon;
        const struct pbias_reg_info *info;
        int voltage;
 };
 
-static int pbias_regulator_set_voltage(struct regulator_dev *dev,
-                       int min_uV, int max_uV, unsigned *selector)
-{
-       struct pbias_regulator_data *data = rdev_get_drvdata(dev);
-       const struct pbias_reg_info *info = data->info;
-       int ret, vmode;
-
-       if (min_uV <= 1800000)
-               vmode = 0;
-       else if (min_uV > 1800000)
-               vmode = info->vmode;
-
-       ret = regmap_update_bits(data->syscon, data->pbias_reg,
-                                               info->vmode, vmode);
-
-       return ret;
-}
-
-static int pbias_regulator_get_voltage(struct regulator_dev *rdev)
-{
-       struct pbias_regulator_data *data = rdev_get_drvdata(rdev);
-       const struct pbias_reg_info *info = data->info;
-       int value, voltage;
-
-       regmap_read(data->syscon, data->pbias_reg, &value);
-       value &= info->vmode;
-
-       voltage = value ? 3000000 : 1800000;
-
-       return voltage;
-}
+static const unsigned int pbias_volt_table[] = {
+       1800000,
+       3000000
+};
 
 static int pbias_regulator_enable(struct regulator_dev *rdev)
 {
        struct pbias_regulator_data *data = rdev_get_drvdata(rdev);
        const struct pbias_reg_info *info = data->info;
-       int ret;
-
-       ret = regmap_update_bits(data->syscon, data->pbias_reg,
-                                       info->enable_mask, info->enable);
-
-       return ret;
-}
-
-static int pbias_regulator_disable(struct regulator_dev *rdev)
-{
-       struct pbias_regulator_data *data = rdev_get_drvdata(rdev);
-       const struct pbias_reg_info *info = data->info;
-       int ret;
 
-       ret = regmap_update_bits(data->syscon, data->pbias_reg,
-                                               info->enable_mask, 0);
-       return ret;
+       return regmap_update_bits(data->syscon, rdev->desc->enable_reg,
+                                 info->enable_mask, info->enable);
 }
 
 static int pbias_regulator_is_enable(struct regulator_dev *rdev)
@@ -106,17 +64,18 @@ static int pbias_regulator_is_enable(struct regulator_dev *rdev)
        const struct pbias_reg_info *info = data->info;
        int value;
 
-       regmap_read(data->syscon, data->pbias_reg, &value);
+       regmap_read(data->syscon, rdev->desc->enable_reg, &value);
 
-       return (value & info->enable_mask) == info->enable_mask;
+       return (value & info->enable_mask) == info->enable;
 }
 
 static struct regulator_ops pbias_regulator_voltage_ops = {
-       .set_voltage    = pbias_regulator_set_voltage,
-       .get_voltage    = pbias_regulator_get_voltage,
-       .enable         = pbias_regulator_enable,
-       .disable        = pbias_regulator_disable,
-       .is_enabled     = pbias_regulator_is_enable,
+       .list_voltage = regulator_list_voltage_table,
+       .get_voltage_sel = regulator_get_voltage_sel_regmap,
+       .set_voltage_sel = regulator_set_voltage_sel_regmap,
+       .enable = pbias_regulator_enable,
+       .disable = regulator_disable_regmap,
+       .is_enabled = pbias_regulator_is_enable,
 };
 
 static const struct pbias_reg_info pbias_mmc_omap2430 = {
@@ -192,6 +151,7 @@ static int pbias_regulator_probe(struct platform_device *pdev)
        if (IS_ERR(syscon))
                return PTR_ERR(syscon);
 
+       cfg.regmap = syscon;
        cfg.dev = &pdev->dev;
 
        for (idx = 0; idx < PBIAS_NUM_REGS && data_idx < count; idx++) {
@@ -207,15 +167,19 @@ static int pbias_regulator_probe(struct platform_device *pdev)
                if (!res)
                        return -EINVAL;
 
-               drvdata[data_idx].pbias_reg = res->start;
                drvdata[data_idx].syscon = syscon;
                drvdata[data_idx].info = info;
                drvdata[data_idx].desc.name = info->name;
                drvdata[data_idx].desc.owner = THIS_MODULE;
                drvdata[data_idx].desc.type = REGULATOR_VOLTAGE;
                drvdata[data_idx].desc.ops = &pbias_regulator_voltage_ops;
+               drvdata[data_idx].desc.volt_table = pbias_volt_table;
                drvdata[data_idx].desc.n_voltages = 2;
                drvdata[data_idx].desc.enable_time = info->enable_time;
+               drvdata[data_idx].desc.vsel_reg = res->start;
+               drvdata[data_idx].desc.vsel_mask = info->vmode;
+               drvdata[data_idx].desc.enable_reg = res->start;
+               drvdata[data_idx].desc.enable_mask = info->enable_mask;
 
                cfg.init_data = pbias_matches[idx].init_data;
                cfg.driver_data = &drvdata[data_idx];
index 9f0ea6cb6922619dfe04803c284002431110e11f..e3bf885f4a6c29fd77f29b51c219a47c6a04ca7e 100644 (file)
@@ -541,18 +541,27 @@ static void chsc_process_sei_nt0(struct chsc_sei_nt0_area *sei_area)
 
 static void chsc_process_event_information(struct chsc_sei *sei, u64 ntsm)
 {
-       do {
+       static int ntsm_unsupported;
+
+       while (true) {
                memset(sei, 0, sizeof(*sei));
                sei->request.length = 0x0010;
                sei->request.code = 0x000e;
-               sei->ntsm = ntsm;
+               if (!ntsm_unsupported)
+                       sei->ntsm = ntsm;
 
                if (chsc(sei))
                        break;
 
                if (sei->response.code != 0x0001) {
-                       CIO_CRW_EVENT(2, "chsc: sei failed (rc=%04x)\n",
-                                     sei->response.code);
+                       CIO_CRW_EVENT(2, "chsc: sei failed (rc=%04x, ntsm=%llx)\n",
+                                     sei->response.code, sei->ntsm);
+
+                       if (sei->response.code == 3 && sei->ntsm) {
+                               /* Fallback for old firmware. */
+                               ntsm_unsupported = 1;
+                               continue;
+                       }
                        break;
                }
 
@@ -568,7 +577,10 @@ static void chsc_process_event_information(struct chsc_sei *sei, u64 ntsm)
                        CIO_CRW_EVENT(2, "chsc: unhandled nt: %d\n", sei->nt);
                        break;
                }
-       } while (sei->u.nt0_area.flags & 0x80);
+
+               if (!(sei->u.nt0_area.flags & 0x80))
+                       break;
+       }
 }
 
 /*
index fd7b3bd807896556d0743660069aff8a44c3e072..d837c3c5330fab5c2c77548f31996db3dacad763 100644 (file)
@@ -3348,7 +3348,7 @@ static int __init claw_init(void)
        }
        CLAW_DBF_TEXT(2, setup, "init_mod");
        claw_root_dev = root_device_register("claw");
-       ret = PTR_RET(claw_root_dev);
+       ret = PTR_ERR_OR_ZERO(claw_root_dev);
        if (ret)
                goto register_err;
        ret = ccw_driver_register(&claw_ccw_driver);
index 70b3a023100ef769180d8234f2ac39c3caa91232..03b6ad035577e28553da16fbf4481d9c249a9e6d 100644 (file)
@@ -1837,7 +1837,7 @@ static int __init ctcm_init(void)
        if (ret)
                goto out_err;
        ctcm_root_dev = root_device_register("ctcm");
-       ret = PTR_RET(ctcm_root_dev);
+       ret = PTR_ERR_OR_ZERO(ctcm_root_dev);
        if (ret)
                goto register_err;
        ret = ccw_driver_register(&ctcm_ccw_driver);
index c461f2aac610ea6a8580c504c9a2fbb9dcb92979..8d5d96969c39213a06124934182d938fd834ec7a 100644 (file)
@@ -2442,7 +2442,7 @@ __init lcs_init_module(void)
        if (rc)
                goto out_err;
        lcs_root_dev = root_device_register("lcs");
-       rc = PTR_RET(lcs_root_dev);
+       rc = PTR_ERR_OR_ZERO(lcs_root_dev);
        if (rc)
                goto register_err;
        rc = ccw_driver_register(&lcs_ccw_driver);
index 5333b2c018e781541905e855c7cf3ff0a5d84a9e..a2088af51cc5d809d7513f0c0cb628c0b81910b6 100644 (file)
@@ -268,10 +268,8 @@ static inline int qeth_is_ipa_enabled(struct qeth_ipa_info *ipa,
 #define QETH_NO_PRIO_QUEUEING 0
 #define QETH_PRIO_Q_ING_PREC  1
 #define QETH_PRIO_Q_ING_TOS   2
-#define IP_TOS_LOWDELAY 0x10
-#define IP_TOS_HIGHTHROUGHPUT 0x08
-#define IP_TOS_HIGHRELIABILITY 0x04
-#define IP_TOS_NOTIMPORTANT 0x02
+#define QETH_PRIO_Q_ING_SKB   3
+#define QETH_PRIO_Q_ING_VLAN  4
 
 /* Packing */
 #define QETH_LOW_WATERMARK_PACK  2
index 22470a3b182f0ba04c9a724d1fb26fc716bc19de..34993009a9e12d67df04ff06f10a46d5dd2cb58e 100644 (file)
@@ -20,6 +20,7 @@
 #include <linux/kthread.h>
 #include <linux/slab.h>
 #include <net/iucv/af_iucv.h>
+#include <net/dsfield.h>
 
 #include <asm/ebcdic.h>
 #include <asm/io.h>
@@ -3670,42 +3671,56 @@ void qeth_qdio_output_handler(struct ccw_device *ccwdev,
 }
 EXPORT_SYMBOL_GPL(qeth_qdio_output_handler);
 
+/**
+ * Note: Function assumes that we have 4 outbound queues.
+ */
 int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb,
                        int ipv, int cast_type)
 {
-       if (!ipv && (card->info.type == QETH_CARD_TYPE_OSD ||
-                    card->info.type == QETH_CARD_TYPE_OSX))
-               return card->qdio.default_out_queue;
-       switch (card->qdio.no_out_queues) {
-       case 4:
-               if (cast_type && card->info.is_multicast_different)
-                       return card->info.is_multicast_different &
-                               (card->qdio.no_out_queues - 1);
-               if (card->qdio.do_prio_queueing && (ipv == 4)) {
-                       const u8 tos = ip_hdr(skb)->tos;
-
-                       if (card->qdio.do_prio_queueing ==
-                               QETH_PRIO_Q_ING_TOS) {
-                               if (tos & IP_TOS_NOTIMPORTANT)
-                                       return 3;
-                               if (tos & IP_TOS_HIGHRELIABILITY)
-                                       return 2;
-                               if (tos & IP_TOS_HIGHTHROUGHPUT)
-                                       return 1;
-                               if (tos & IP_TOS_LOWDELAY)
-                                       return 0;
-                       }
-                       if (card->qdio.do_prio_queueing ==
-                               QETH_PRIO_Q_ING_PREC)
-                               return 3 - (tos >> 6);
-               } else if (card->qdio.do_prio_queueing && (ipv == 6)) {
-                       /* TODO: IPv6!!! */
+       __be16 *tci;
+       u8 tos;
+
+       if (cast_type && card->info.is_multicast_different)
+               return card->info.is_multicast_different &
+                       (card->qdio.no_out_queues - 1);
+
+       switch (card->qdio.do_prio_queueing) {
+       case QETH_PRIO_Q_ING_TOS:
+       case QETH_PRIO_Q_ING_PREC:
+               switch (ipv) {
+               case 4:
+                       tos = ipv4_get_dsfield(ip_hdr(skb));
+                       break;
+               case 6:
+                       tos = ipv6_get_dsfield(ipv6_hdr(skb));
+                       break;
+               default:
+                       return card->qdio.default_out_queue;
                }
-               return card->qdio.default_out_queue;
-       case 1: /* fallthrough for single-out-queue 1920-device */
+               if (card->qdio.do_prio_queueing == QETH_PRIO_Q_ING_PREC)
+                       return ~tos >> 6 & 3;
+               if (tos & IPTOS_MINCOST)
+                       return 3;
+               if (tos & IPTOS_RELIABILITY)
+                       return 2;
+               if (tos & IPTOS_THROUGHPUT)
+                       return 1;
+               if (tos & IPTOS_LOWDELAY)
+                       return 0;
+               break;
+       case QETH_PRIO_Q_ING_SKB:
+               if (skb->priority > 5)
+                       return 0;
+               return ~skb->priority >> 1 & 3;
+       case QETH_PRIO_Q_ING_VLAN:
+               tci = &((struct ethhdr *)skb->data)->h_proto;
+               if (*tci == ETH_P_8021Q)
+                       return ~*(tci + 1) >> (VLAN_PRIO_SHIFT + 1) & 3;
+               break;
        default:
-               return card->qdio.default_out_queue;
+               break;
        }
+       return card->qdio.default_out_queue;
 }
 EXPORT_SYMBOL_GPL(qeth_get_priority_queue);
 
@@ -5824,7 +5839,7 @@ static int __init qeth_core_init(void)
        if (rc)
                goto out_err;
        qeth_core_root_dev = root_device_register("qeth");
-       rc = PTR_RET(qeth_core_root_dev);
+       rc = PTR_ERR_OR_ZERO(qeth_core_root_dev);
        if (rc)
                goto register_err;
        qeth_core_header_cache = kmem_cache_create("qeth_hdr",
index 425c0ecf1f3b9fd2ae3c2c55f61b1ad10f08c028..8a25a2be9890e7e09af1c9845c5b9b9773472f00 100644 (file)
@@ -217,6 +217,10 @@ static ssize_t qeth_dev_prioqing_show(struct device *dev,
                return sprintf(buf, "%s\n", "by precedence");
        case QETH_PRIO_Q_ING_TOS:
                return sprintf(buf, "%s\n", "by type of service");
+       case QETH_PRIO_Q_ING_SKB:
+               return sprintf(buf, "%s\n", "by skb-priority");
+       case QETH_PRIO_Q_ING_VLAN:
+               return sprintf(buf, "%s\n", "by VLAN headers");
        default:
                return sprintf(buf, "always queue %i\n",
                               card->qdio.default_out_queue);
@@ -250,11 +254,23 @@ static ssize_t qeth_dev_prioqing_store(struct device *dev,
        }
 
        tmp = strsep((char **) &buf, "\n");
-       if (!strcmp(tmp, "prio_queueing_prec"))
+       if (!strcmp(tmp, "prio_queueing_prec")) {
                card->qdio.do_prio_queueing = QETH_PRIO_Q_ING_PREC;
-       else if (!strcmp(tmp, "prio_queueing_tos"))
+               card->qdio.default_out_queue = QETH_DEFAULT_QUEUE;
+       } else if (!strcmp(tmp, "prio_queueing_skb")) {
+               card->qdio.do_prio_queueing = QETH_PRIO_Q_ING_SKB;
+               card->qdio.default_out_queue = QETH_DEFAULT_QUEUE;
+       } else if (!strcmp(tmp, "prio_queueing_tos")) {
                card->qdio.do_prio_queueing = QETH_PRIO_Q_ING_TOS;
-       else if (!strcmp(tmp, "no_prio_queueing:0")) {
+               card->qdio.default_out_queue = QETH_DEFAULT_QUEUE;
+       } else if (!strcmp(tmp, "prio_queueing_vlan")) {
+               if (!card->options.layer2) {
+                       rc = -ENOTSUPP;
+                       goto out;
+               }
+               card->qdio.do_prio_queueing = QETH_PRIO_Q_ING_VLAN;
+               card->qdio.default_out_queue = QETH_DEFAULT_QUEUE;
+       } else if (!strcmp(tmp, "no_prio_queueing:0")) {
                card->qdio.do_prio_queueing = QETH_NO_PRIO_QUEUEING;
                card->qdio.default_out_queue = 0;
        } else if (!strcmp(tmp, "no_prio_queueing:1")) {
index 8dea3f12ccc1714defe7d4d65869817dd6b69135..5ef5b4f45758cd226bb58becde5a3fa81ff33bb2 100644 (file)
@@ -725,15 +725,20 @@ static int qeth_l2_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
        int elements = 0;
        struct qeth_card *card = dev->ml_priv;
        struct sk_buff *new_skb = skb;
-       int ipv = qeth_get_ip_version(skb);
        int cast_type = qeth_l2_get_cast_type(card, skb);
-       struct qeth_qdio_out_q *queue = card->qdio.out_qs
-               [qeth_get_priority_queue(card, skb, ipv, cast_type)];
+       struct qeth_qdio_out_q *queue;
        int tx_bytes = skb->len;
        int data_offset = -1;
        int elements_needed = 0;
        int hd_len = 0;
 
+       if (card->qdio.do_prio_queueing || (cast_type &&
+                                       card->info.is_multicast_different))
+               queue = card->qdio.out_qs[qeth_get_priority_queue(card, skb,
+                                       qeth_get_ip_version(skb), cast_type)];
+       else
+               queue = card->qdio.out_qs[card->qdio.default_out_queue];
+
        if ((card->state != CARD_STATE_UP) || !card->lan_online) {
                card->stats.tx_carrier_errors++;
                goto tx_drop;
@@ -964,10 +969,9 @@ static int qeth_l2_setup_netdev(struct qeth_card *card)
        card->dev->watchdog_timeo = QETH_TX_TIMEOUT;
        card->dev->mtu = card->info.initial_mtu;
        card->dev->netdev_ops = &qeth_l2_netdev_ops;
-       if (card->info.type != QETH_CARD_TYPE_OSN)
-               SET_ETHTOOL_OPS(card->dev, &qeth_l2_ethtool_ops);
-       else
-               SET_ETHTOOL_OPS(card->dev, &qeth_l2_osn_ops);
+       card->dev->ethtool_ops =
+               (card->info.type != QETH_CARD_TYPE_OSN) ?
+               &qeth_l2_ethtool_ops : &qeth_l2_osn_ops;
        card->dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
        card->info.broadcast_capable = 1;
        qeth_l2_request_initial_mac(card);
index 3524d34ff694c273afefc7d85bfa17b6bce38af9..c58f82af36585d8293ec1e7876e323b2b98d516d 100644 (file)
@@ -1659,7 +1659,7 @@ static void qeth_l3_add_vlan_mc(struct qeth_card *card)
        for_each_set_bit(vid, card->active_vlans, VLAN_N_VID) {
                struct net_device *netdev;
 
-               netdev = __vlan_find_dev_deep(card->dev, htons(ETH_P_8021Q),
+               netdev = __vlan_find_dev_deep_rcu(card->dev, htons(ETH_P_8021Q),
                                              vid);
                if (netdev == NULL ||
                    !(netdev->flags & IFF_UP))
@@ -1721,7 +1721,7 @@ static void qeth_l3_add_vlan_mc6(struct qeth_card *card)
        for_each_set_bit(vid, card->active_vlans, VLAN_N_VID) {
                struct net_device *netdev;
 
-               netdev = __vlan_find_dev_deep(card->dev, htons(ETH_P_8021Q),
+               netdev = __vlan_find_dev_deep_rcu(card->dev, htons(ETH_P_8021Q),
                                              vid);
                if (netdev == NULL ||
                    !(netdev->flags & IFF_UP))
@@ -1766,7 +1766,7 @@ static void qeth_l3_free_vlan_addresses4(struct qeth_card *card,
 
        QETH_CARD_TEXT(card, 4, "frvaddr4");
 
-       netdev = __vlan_find_dev_deep(card->dev, htons(ETH_P_8021Q), vid);
+       netdev = __vlan_find_dev_deep_rcu(card->dev, htons(ETH_P_8021Q), vid);
        if (!netdev)
                return;
        in_dev = in_dev_get(netdev);
@@ -1796,7 +1796,7 @@ static void qeth_l3_free_vlan_addresses6(struct qeth_card *card,
 
        QETH_CARD_TEXT(card, 4, "frvaddr6");
 
-       netdev = __vlan_find_dev_deep(card->dev, htons(ETH_P_8021Q), vid);
+       netdev = __vlan_find_dev_deep_rcu(card->dev, htons(ETH_P_8021Q), vid);
        if (!netdev)
                return;
        in6_dev = in6_dev_get(netdev);
@@ -2089,7 +2089,7 @@ static int qeth_l3_verify_vlan_dev(struct net_device *dev,
                struct net_device *netdev;
 
                rcu_read_lock();
-               netdev = __vlan_find_dev_deep(card->dev, htons(ETH_P_8021Q),
+               netdev = __vlan_find_dev_deep_rcu(card->dev, htons(ETH_P_8021Q),
                                              vid);
                rcu_read_unlock();
                if (netdev == dev) {
@@ -2926,8 +2926,11 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
        struct sk_buff *new_skb = NULL;
        int ipv = qeth_get_ip_version(skb);
        int cast_type = qeth_l3_get_cast_type(card, skb);
-       struct qeth_qdio_out_q *queue = card->qdio.out_qs
-               [qeth_get_priority_queue(card, skb, ipv, cast_type)];
+       struct qeth_qdio_out_q *queue =
+               card->qdio.out_qs[card->qdio.do_prio_queueing
+                       || (cast_type && card->info.is_multicast_different) ?
+                       qeth_get_priority_queue(card, skb, ipv, cast_type) :
+                       card->qdio.default_out_queue];
        int tx_bytes = skb->len;
        bool large_send;
        int data_offset = -1;
@@ -3298,7 +3301,7 @@ static int qeth_l3_setup_netdev(struct qeth_card *card)
        card->dev->ml_priv = card;
        card->dev->watchdog_timeo = QETH_TX_TIMEOUT;
        card->dev->mtu = card->info.initial_mtu;
-       SET_ETHTOOL_OPS(card->dev, &qeth_l3_ethtool_ops);
+       card->dev->ethtool_ops = &qeth_l3_ethtool_ops;
        card->dev->features |=  NETIF_F_HW_VLAN_CTAG_TX |
                                NETIF_F_HW_VLAN_CTAG_RX |
                                NETIF_F_HW_VLAN_CTAG_FILTER;
index 8cf4a0c69baf4cebfc5ecb4fb28c029c4455d886..9a6e4a2cd072421df1980edfa4c8f914398b3991 100644 (file)
@@ -7463,6 +7463,10 @@ static void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h)
        if (hpsa_simple_mode)
                return;
 
+       trans_support = readl(&(h->cfgtable->TransportSupport));
+       if (!(trans_support & PERFORMANT_MODE))
+               return;
+
        /* Check for I/O accelerator mode support */
        if (trans_support & CFGTBL_Trans_io_accel1) {
                transMethod |= CFGTBL_Trans_io_accel1 |
@@ -7479,10 +7483,6 @@ static void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h)
        }
 
        /* TODO, check that this next line h->nreply_queues is correct */
-       trans_support = readl(&(h->cfgtable->TransportSupport));
-       if (!(trans_support & PERFORMANT_MODE))
-               return;
-
        h->nreply_queues = h->msix_vector > 0 ? h->msix_vector : 1;
        hpsa_get_max_perf_mode_cmds(h);
        /* Performant mode ring buffer and supporting data structures */
index 7f0af4fcc0019127ab4d60fc0550ed6daa86ffb6..6fd7d40b2c4dea102e15a2e9c76fef3500c09435 100644 (file)
@@ -8293,7 +8293,6 @@ _scsih_suspend(struct pci_dev *pdev, pm_message_t state)
 
        mpt2sas_base_free_resources(ioc);
        pci_save_state(pdev);
-       pci_disable_device(pdev);
        pci_set_power_state(pdev, device_state);
        return 0;
 }
index 771c16bfdbac4be2fac180459ff5711407c98a01..f17aa7aa78796e7f6d358b8cd5f68fd43cfee4d4 100644 (file)
@@ -189,6 +189,7 @@ scsi_abort_command(struct scsi_cmnd *scmd)
                /*
                 * Retry after abort failed, escalate to next level.
                 */
+               scmd->eh_eflags &= ~SCSI_EH_ABORT_SCHEDULED;
                SCSI_LOG_ERROR_RECOVERY(3,
                        scmd_printk(KERN_INFO, scmd,
                                    "scmd %p previous abort failed\n", scmd));
@@ -920,10 +921,12 @@ void scsi_eh_prep_cmnd(struct scsi_cmnd *scmd, struct scsi_eh_save *ses,
        ses->prot_op = scmd->prot_op;
 
        scmd->prot_op = SCSI_PROT_NORMAL;
+       scmd->eh_eflags = 0;
        scmd->cmnd = ses->eh_cmnd;
        memset(scmd->cmnd, 0, BLK_MAX_CDB);
        memset(&scmd->sdb, 0, sizeof(scmd->sdb));
        scmd->request->next_rq = NULL;
+       scmd->result = 0;
 
        if (sense_bytes) {
                scmd->sdb.length = min_t(unsigned, SCSI_SENSE_BUFFERSIZE,
@@ -1157,6 +1160,15 @@ int scsi_eh_get_sense(struct list_head *work_q,
                                             __func__));
                        break;
                }
+               if (status_byte(scmd->result) != CHECK_CONDITION)
+                       /*
+                        * don't request sense if there's no check condition
+                        * status because the error we're processing isn't one
+                        * that has a sense code (and some devices get
+                        * confused by sense requests out of the blue)
+                        */
+                       continue;
+
                SCSI_LOG_ERROR_RECOVERY(2, scmd_printk(KERN_INFO, scmd,
                                                  "%s: requesting sense\n",
                                                  current->comm));
index 65a123d9c67649822e2ab0333bf534f820b8b212..9db097a28a74588c793c0521c7f80f8540820f61 100644 (file)
@@ -137,6 +137,7 @@ static void __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, int unbusy)
         * lock such that the kblockd_schedule_work() call happens
         * before blk_cleanup_queue() finishes.
         */
+       cmd->result = 0;
        spin_lock_irqsave(q->queue_lock, flags);
        blk_requeue_request(q, cmd->request);
        kblockd_schedule_work(q, &device->requeue_work);
@@ -1044,6 +1045,7 @@ static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb,
  */
 int scsi_init_io(struct scsi_cmnd *cmd, gfp_t gfp_mask)
 {
+       struct scsi_device *sdev = cmd->device;
        struct request *rq = cmd->request;
 
        int error = scsi_init_sgtable(rq, &cmd->sdb, gfp_mask);
@@ -1091,7 +1093,7 @@ err_exit:
        scsi_release_buffers(cmd);
        cmd->request->special = NULL;
        scsi_put_command(cmd);
-       put_device(&cmd->device->sdev_gendev);
+       put_device(&sdev->sdev_gendev);
        return error;
 }
 EXPORT_SYMBOL(scsi_init_io);
@@ -1273,7 +1275,7 @@ int scsi_prep_return(struct request_queue *q, struct request *req, int ret)
                        struct scsi_cmnd *cmd = req->special;
                        scsi_release_buffers(cmd);
                        scsi_put_command(cmd);
-                       put_device(&cmd->device->sdev_gendev);
+                       put_device(&sdev->sdev_gendev);
                        req->special = NULL;
                }
                break;
index fe30ea94ffe67ef4e5d355fdc9cdcb71eee9e0d7..109802f776ed71cea6857eda9ae6ccc3e0b41f80 100644 (file)
@@ -77,7 +77,7 @@ scsi_nl_rcv_msg(struct sk_buff *skb)
                        goto next_msg;
                }
 
-               if (!capable(CAP_SYS_ADMIN)) {
+               if (!netlink_capable(skb, CAP_SYS_ADMIN)) {
                        err = -EPERM;
                        goto next_msg;
                }
index 16bfd50cd3fe65644c5443698d3aa3e96dfd5925..db3b494e5926a423866e0ad3a18b15b6378d3cca 100644 (file)
@@ -750,8 +750,12 @@ static void __virtscsi_set_affinity(struct virtio_scsi *vscsi, bool affinity)
 
                vscsi->affinity_hint_set = true;
        } else {
-               for (i = 0; i < vscsi->num_queues; i++)
+               for (i = 0; i < vscsi->num_queues; i++) {
+                       if (!vscsi->req_vqs[i].vq)
+                               continue;
+
                        virtqueue_set_affinity(vscsi->req_vqs[i].vq, -1);
+               }
 
                vscsi->affinity_hint_set = false;
        }
index 8005f986948173e55d5bc5a5387da80c8a5b9185..079e6b1b0cdb6fbc2e05532f3d8548653f4d4694 100644 (file)
@@ -1115,8 +1115,11 @@ static int atmel_spi_one_transfer(struct spi_master *master,
                        atmel_spi_next_xfer_pio(master, xfer);
                }
 
+               /* interrupts are disabled, so free the lock for schedule */
+               atmel_spi_unlock(as);
                ret = wait_for_completion_timeout(&as->xfer_completion,
                                                        SPI_DMA_TIMEOUT);
+               atmel_spi_lock(as);
                if (WARN_ON(ret == 0)) {
                        dev_err(&spi->dev,
                                "spi trasfer timeout, err %d\n", ret);
index 55e57c3eb9bd051bc7fcca7d5090a2d999bf688b..ebf720b88a2a5ca5c47c389379c50ab18e4cbd2b 100644 (file)
@@ -12,6 +12,7 @@
 #include <linux/module.h>
 #include <linux/delay.h>
 #include <linux/device.h>
+#include <linux/gpio.h>
 #include <linux/slab.h>
 #include <linux/io.h>
 #include <linux/ioport.h>
index 9009456bdf4d29c6febf56b162113fa5e4bf78ef..c8e795ef2e132fcb10dd4ac196decbd887861f5d 100644 (file)
@@ -244,9 +244,9 @@ static int hspi_probe(struct platform_device *pdev)
                return -ENOMEM;
        }
 
-       clk = clk_get(NULL, "shyway_clk");
+       clk = clk_get(&pdev->dev, NULL);
        if (IS_ERR(clk)) {
-               dev_err(&pdev->dev, "shyway_clk is required\n");
+               dev_err(&pdev->dev, "couldn't get clock\n");
                ret = -EINVAL;
                goto error0;
        }
index 1a77ad52812fd79d3a2524264e7a2a8678ca1464..67d8909dcf3946a4d516d607fd83cef417aaabc1 100644 (file)
@@ -287,8 +287,8 @@ static irqreturn_t spi_sirfsoc_irq(int irq, void *dev_id)
                                sspi->left_rx_word)
                        sspi->rx_word(sspi);
 
-       if (spi_stat & (SIRFSOC_SPI_FIFO_EMPTY
-                       SIRFSOC_SPI_TXFIFO_THD_REACH))
+       if (spi_stat & (SIRFSOC_SPI_TXFIFO_EMPTY |
+                       SIRFSOC_SPI_TXFIFO_THD_REACH))
                while (!((readl(sspi->base + SIRFSOC_SPI_TXFIFO_STATUS)
                                & SIRFSOC_SPI_FIFO_FULL)) &&
                                sspi->left_tx_word)
@@ -470,7 +470,16 @@ static void spi_sirfsoc_chipselect(struct spi_device *spi, int value)
                writel(regval, sspi->base + SIRFSOC_SPI_CTRL);
        } else {
                int gpio = sspi->chipselect[spi->chip_select];
-               gpio_direction_output(gpio, spi->mode & SPI_CS_HIGH ? 0 : 1);
+               switch (value) {
+               case BITBANG_CS_ACTIVE:
+                       gpio_direction_output(gpio,
+                                       spi->mode & SPI_CS_HIGH ? 1 : 0);
+                       break;
+               case BITBANG_CS_INACTIVE:
+                       gpio_direction_output(gpio,
+                                       spi->mode & SPI_CS_HIGH ? 0 : 1);
+                       break;
+               }
        }
 }
 
@@ -559,6 +568,11 @@ spi_sirfsoc_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
                regval &= ~SIRFSOC_SPI_CMD_MODE;
                sspi->tx_by_cmd = false;
        }
+       /*
+        * set spi controller in RISC chipselect mode, we are controlling CS by
+        * software BITBANG_CS_ACTIVE and BITBANG_CS_INACTIVE.
+        */
+       regval |= SIRFSOC_SPI_CS_IO_MODE;
        writel(regval, sspi->base + SIRFSOC_SPI_CTRL);
 
        if (IS_DMA_VALID(t)) {
index 71db683098d67399b3c6918256e977af92ab6e85..b59af030358190a9539b16c7fd8d8b20fb16c30a 100644 (file)
@@ -493,7 +493,7 @@ static void usbduxsub_ao_isoc_irq(struct urb *urb)
                        /* pointer to the DA */
                        *datap++ = val & 0xff;
                        *datap++ = (val >> 8) & 0xff;
-                       *datap++ = chan;
+                       *datap++ = chan << 6;
                        devpriv->ao_readback[chan] = val;
 
                        s->async->events |= COMEDI_CB_BLOCK;
@@ -1040,11 +1040,8 @@ static int usbdux_ao_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
        /* set current channel of the running acquisition to zero */
        s->async->cur_chan = 0;
 
-       for (i = 0; i < cmd->chanlist_len; ++i) {
-               unsigned int chan = CR_CHAN(cmd->chanlist[i]);
-
-               devpriv->ao_chanlist[i] = chan << 6;
-       }
+       for (i = 0; i < cmd->chanlist_len; ++i)
+               devpriv->ao_chanlist[i] = CR_CHAN(cmd->chanlist[i]);
 
        /* we count in steps of 1ms (125us) */
        /* 125us mode not used yet */
index d329cf31436048598f6f0221de7ad4b586c58861..15e0f4da3ce07ba4ea597b021735e025fe83e0fa 100644 (file)
@@ -4604,7 +4604,7 @@ static int et131x_pci_setup(struct pci_dev *pdev,
        netdev->netdev_ops     = &et131x_netdev_ops;
 
        SET_NETDEV_DEV(netdev, &pdev->dev);
-       SET_ETHTOOL_OPS(netdev, &et131x_ethtool_ops);
+       netdev->ethtool_ops = &et131x_ethtool_ops;
 
        adapter = et131x_adapter_init(netdev, pdev);
 
index d6421b9b5981c2265a1c796a03ade4ec5416e878..a6158bef58e54c63dc19ebfcb3ef89daf2b0874e 100644 (file)
@@ -2249,7 +2249,7 @@ struct net_device *init_ft1000_card(struct pcmcia_device *link,
 
        ft1000InitProc(dev);
        ft1000_card_present = 1;
-       SET_ETHTOOL_OPS(dev, &ops);
+       dev->ethtool_ops = &ops;
        printk(KERN_INFO "ft1000: %s: addr 0x%04lx irq %d, MAC addr %pM\n",
                        dev->name, dev->base_addr, dev->irq, dev->dev_addr);
        return dev;
index 11fb95201545233921f67d912051b27119a1648a..dae8d1a9038e661885e2c3bf51869ba76121d122 100644 (file)
@@ -1526,7 +1526,7 @@ static int mxs_lradc_probe(struct platform_device *pdev)
        struct resource *iores;
        int ret = 0, touch_ret;
        int i, s;
-       unsigned int scale_uv;
+       uint64_t scale_uv;
 
        /* Allocate the IIO device. */
        iio = devm_iio_device_alloc(dev, sizeof(*lradc));
index 36eedd8a0ea9815c168889d3ed614597a93847a6..017d2f8379b78ca86f7e30ed0e85855684c7b6ec 100644 (file)
@@ -70,6 +70,7 @@ static int ad2s1200_read_raw(struct iio_dev *indio_dev,
                vel = (((s16)(st->rx[0])) << 4) | ((st->rx[1] & 0xF0) >> 4);
                vel = (vel << 4) >> 4;
                *val = vel;
+               break;
        default:
                mutex_unlock(&st->lock);
                return -EINVAL;
@@ -106,7 +107,7 @@ static int ad2s1200_probe(struct spi_device *spi)
        int pn, ret = 0;
        unsigned short *pins = spi->dev.platform_data;
 
-       for (pn = 0; pn < AD2S1200_PN; pn++)
+       for (pn = 0; pn < AD2S1200_PN; pn++) {
                ret = devm_gpio_request_one(&spi->dev, pins[pn], GPIOF_DIR_OUT,
                                            DRV_NAME);
                if (ret) {
@@ -114,6 +115,7 @@ static int ad2s1200_probe(struct spi_device *spi)
                                                        pins[pn]);
                        return ret;
                }
+       }
        indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
        if (!indio_dev)
                return -ENOMEM;
index c83e3375104b5066db2a4991726fbb8a3aa45fde..9d957615e32addcd405ebdfe4ad505b99a42fdc9 100644 (file)
@@ -1066,7 +1066,7 @@ static int xlr_net_probe(struct platform_device *pdev)
        xlr_set_rx_mode(ndev);
 
        priv->num_rx_desc += MAX_NUM_DESC_SPILL;
-       SET_ETHTOOL_OPS(ndev, &xlr_ethtool_ops);
+       ndev->ethtool_ops = &xlr_ethtool_ops;
        SET_NETDEV_DEV(ndev, &pdev->dev);
 
        /* Common registers, do one time initialization */
index ff7214aac9dd6ed05c524c69fa23e35767d2dcce..da9dd6bc56600f2fe094df6a59d3836ad491be72 100644 (file)
@@ -469,7 +469,7 @@ int cvm_oct_common_init(struct net_device *dev)
 
        /* We do our own locking, Linux doesn't need to */
        dev->features |= NETIF_F_LLTX;
-       SET_ETHTOOL_OPS(dev, &cvm_oct_ethtool_ops);
+       dev->ethtool_ops = &cvm_oct_ethtool_ops;
 
        cvm_oct_phy_setup_device(dev);
        cvm_oct_set_mac_filter(dev);
index ff3139b6da656f2f985b2c1498fa16508c2030b2..63ae2d1997d3c19fb1bd38c97ac4d39bd1c7cf52 100644 (file)
@@ -1414,23 +1414,15 @@ static void rtl_op_rfkill_poll(struct ieee80211_hw *hw)
  * before switch channel or power save, or tx buffer packet
  * maybe send after offchannel or rf sleep, this may cause
  * dis-association by AP */
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0))
-static void rtl_op_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
+static void rtl_op_flush(struct ieee80211_hw *hw,
+                        struct ieee80211_vif *vif,
+                        u32 queues, bool drop)
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
 
        if (rtlpriv->intf_ops->flush)
                rtlpriv->intf_ops->flush(hw, queues, drop);
 }
-#else
-static void rtl_op_flush(struct ieee80211_hw *hw, bool drop)
-{
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-
-       if (rtlpriv->intf_ops->flush)
-               rtlpriv->intf_ops->flush(hw, drop);
-}
-#endif
 
 const struct ieee80211_ops rtl_ops = {
        .start = rtl_op_start,
index 94f9e3a38412f3071d63b4964925b375abec4283..0ff7fda0742f4326113cf82f8a72cb355c840319 100644 (file)
@@ -190,7 +190,7 @@ static struct tty_driver *hvc_console_device(struct console *c, int *index)
        return hvc_driver;
 }
 
-static int __init hvc_console_setup(struct console *co, char *options)
+static int hvc_console_setup(struct console *co, char *options)
 {      
        if (co->index < 0 || co->index >= MAX_NR_HVC_CONSOLES)
                return -ENODEV;
index 41fe8a047d373cf84b14a9a2f5d8f41e07fd3b5f..fe9d129c87351b47392320a626dedb89e2f0bf55 100644 (file)
@@ -2353,8 +2353,12 @@ static ssize_t n_tty_write(struct tty_struct *tty, struct file *file,
                        if (tty->ops->flush_chars)
                                tty->ops->flush_chars(tty);
                } else {
+                       struct n_tty_data *ldata = tty->disc_data;
+
                        while (nr > 0) {
+                               mutex_lock(&ldata->output_lock);
                                c = tty->ops->write(tty, b, nr);
+                               mutex_unlock(&ldata->output_lock);
                                if (c < 0) {
                                        retval = c;
                                        goto break_out;
index 81f909c2101f6145f568fd048b60453533cf943d..2d4bd3929e507376f7d4b25f788fbba3b61af1a4 100644 (file)
@@ -555,7 +555,7 @@ static void serial8250_set_sleep(struct uart_8250_port *p, int sleep)
         */
        if ((p->port.type == PORT_XR17V35X) ||
           (p->port.type == PORT_XR17D15X)) {
-               serial_out(p, UART_EXAR_SLEEP, 0xff);
+               serial_out(p, UART_EXAR_SLEEP, sleep ? 0xff : 0);
                return;
        }
 
@@ -1520,7 +1520,7 @@ int serial8250_handle_irq(struct uart_port *port, unsigned int iir)
                        status = serial8250_rx_chars(up, status);
        }
        serial8250_modem_status(up);
-       if (status & UART_LSR_THRE)
+       if (!up->dma && (status & UART_LSR_THRE))
                serial8250_tx_chars(up);
 
        spin_unlock_irqrestore(&port->lock, flags);
index 7046769608d403501158a044fd109203f0e0531f..ab9096dc384976de15c41f0d5f39b6975b72a45d 100644 (file)
@@ -20,12 +20,15 @@ static void __dma_tx_complete(void *param)
        struct uart_8250_port   *p = param;
        struct uart_8250_dma    *dma = p->dma;
        struct circ_buf         *xmit = &p->port.state->xmit;
-
-       dma->tx_running = 0;
+       unsigned long   flags;
 
        dma_sync_single_for_cpu(dma->txchan->device->dev, dma->tx_addr,
                                UART_XMIT_SIZE, DMA_TO_DEVICE);
 
+       spin_lock_irqsave(&p->port.lock, flags);
+
+       dma->tx_running = 0;
+
        xmit->tail += dma->tx_size;
        xmit->tail &= UART_XMIT_SIZE - 1;
        p->port.icount.tx += dma->tx_size;
@@ -35,6 +38,8 @@ static void __dma_tx_complete(void *param)
 
        if (!uart_circ_empty(xmit) && !uart_tx_stopped(&p->port))
                serial8250_tx_dma(p);
+
+       spin_unlock_irqrestore(&p->port.lock, flags);
 }
 
 static void __dma_rx_complete(void *param)
index 23f4596007382501589476b885d269bcce1484a6..1f5505e7f90dd9de0c84770a164c3a1d95968dc4 100644 (file)
@@ -1446,8 +1446,8 @@ static int s3c24xx_serial_get_poll_char(struct uart_port *port)
 static void s3c24xx_serial_put_poll_char(struct uart_port *port,
                unsigned char c)
 {
-       unsigned int ufcon = rd_regl(cons_uart, S3C2410_UFCON);
-       unsigned int ucon = rd_regl(cons_uart, S3C2410_UCON);
+       unsigned int ufcon = rd_regl(port, S3C2410_UFCON);
+       unsigned int ucon = rd_regl(port, S3C2410_UCON);
 
        /* not possible to xmit on unconfigured port */
        if (!s3c24xx_port_configured(ucon))
@@ -1455,7 +1455,7 @@ static void s3c24xx_serial_put_poll_char(struct uart_port *port,
 
        while (!s3c24xx_serial_console_txrdy(port, ufcon))
                cpu_relax();
-       wr_regb(cons_uart, S3C2410_UTXH, c);
+       wr_regb(port, S3C2410_UTXH, c);
 }
 
 #endif /* CONFIG_CONSOLE_POLL */
@@ -1463,22 +1463,23 @@ static void s3c24xx_serial_put_poll_char(struct uart_port *port,
 static void
 s3c24xx_serial_console_putchar(struct uart_port *port, int ch)
 {
-       unsigned int ufcon = rd_regl(cons_uart, S3C2410_UFCON);
-       unsigned int ucon = rd_regl(cons_uart, S3C2410_UCON);
-
-       /* not possible to xmit on unconfigured port */
-       if (!s3c24xx_port_configured(ucon))
-               return;
+       unsigned int ufcon = rd_regl(port, S3C2410_UFCON);
 
        while (!s3c24xx_serial_console_txrdy(port, ufcon))
-               barrier();
-       wr_regb(cons_uart, S3C2410_UTXH, ch);
+               cpu_relax();
+       wr_regb(port, S3C2410_UTXH, ch);
 }
 
 static void
 s3c24xx_serial_console_write(struct console *co, const char *s,
                             unsigned int count)
 {
+       unsigned int ucon = rd_regl(cons_uart, S3C2410_UCON);
+
+       /* not possible to xmit on unconfigured port */
+       if (!s3c24xx_port_configured(ucon))
+               return;
+
        uart_console_write(cons_uart, s, count, s3c24xx_serial_console_putchar);
 }
 
index f26834d262b3a8d540a96455480fa4d5b6a1a81e..b68550d95a403dbc6d5ecb0faa4a5bb4d38cd86f 100644 (file)
@@ -136,6 +136,11 @@ static int uart_port_startup(struct tty_struct *tty, struct uart_state *state,
        if (uport->type == PORT_UNKNOWN)
                return 1;
 
+       /*
+        * Make sure the device is in D0 state.
+        */
+       uart_change_pm(state, UART_PM_STATE_ON);
+
        /*
         * Initialise and allocate the transmit and temporary
         * buffer.
@@ -825,25 +830,29 @@ static int uart_set_info(struct tty_struct *tty, struct tty_port *port,
                 * If we fail to request resources for the
                 * new port, try to restore the old settings.
                 */
-               if (retval && old_type != PORT_UNKNOWN) {
+               if (retval) {
                        uport->iobase = old_iobase;
                        uport->type = old_type;
                        uport->hub6 = old_hub6;
                        uport->iotype = old_iotype;
                        uport->regshift = old_shift;
                        uport->mapbase = old_mapbase;
-                       retval = uport->ops->request_port(uport);
-                       /*
-                        * If we failed to restore the old settings,
-                        * we fail like this.
-                        */
-                       if (retval)
-                               uport->type = PORT_UNKNOWN;
 
-                       /*
-                        * We failed anyway.
-                        */
-                       retval = -EBUSY;
+                       if (old_type != PORT_UNKNOWN) {
+                               retval = uport->ops->request_port(uport);
+                               /*
+                                * If we failed to restore the old settings,
+                                * we fail like this.
+                                */
+                               if (retval)
+                                       uport->type = PORT_UNKNOWN;
+
+                               /*
+                                * We failed anyway.
+                                */
+                               retval = -EBUSY;
+                       }
+
                        /* Added to return the correct error -Ram Gupta */
                        goto exit;
                }
@@ -1570,12 +1579,6 @@ static int uart_open(struct tty_struct *tty, struct file *filp)
                goto err_dec_count;
        }
 
-       /*
-        * Make sure the device is in D0 state.
-        */
-       if (port->count == 1)
-               uart_change_pm(state, UART_PM_STATE_ON);
-
        /*
         * Start up the serial port.
         */
index 8ebd9f88a6f69ff85f63139944fad2542c789483..cf78d1985cd851fb2b6615054bfabf5a8e3b2b13 100644 (file)
@@ -258,7 +258,11 @@ static int __tty_buffer_request_room(struct tty_port *port, size_t size,
                        n->flags = flags;
                        buf->tail = n;
                        b->commit = b->used;
-                       smp_mb();
+                       /* paired w/ barrier in flush_to_ldisc(); ensures the
+                        * latest commit value can be read before the head is
+                        * advanced to the next buffer
+                        */
+                       smp_wmb();
                        b->next = n;
                } else if (change)
                        size = 0;
@@ -444,17 +448,24 @@ static void flush_to_ldisc(struct work_struct *work)
 
        while (1) {
                struct tty_buffer *head = buf->head;
+               struct tty_buffer *next;
                int count;
 
                /* Ldisc or user is trying to gain exclusive access */
                if (atomic_read(&buf->priority))
                        break;
 
+               next = head->next;
+               /* paired w/ barrier in __tty_buffer_request_room();
+                * ensures commit value read is not stale if the head
+                * is advancing to the next buffer
+                */
+               smp_rmb();
                count = head->commit - head->read;
                if (!count) {
-                       if (head->next == NULL)
+                       if (next == NULL)
                                break;
-                       buf->head = head->next;
+                       buf->head = next;
                        tty_buffer_free(port, head);
                        continue;
                }
index ca6831c5b763053d146d8c7bbae2f10d85e3c3d1..1cd5d0ba587c8ebaf49c876a086570be4cabd8c9 100644 (file)
@@ -276,6 +276,39 @@ static void hw_phymode_configure(struct ci_hdrc *ci)
        }
 }
 
+/**
+ * ci_usb_phy_init: initialize phy according to different phy type
+ * @ci: the controller
+  *
+ * This function returns an error code if usb_phy_init has failed
+ */
+static int ci_usb_phy_init(struct ci_hdrc *ci)
+{
+       int ret;
+
+       switch (ci->platdata->phy_mode) {
+       case USBPHY_INTERFACE_MODE_UTMI:
+       case USBPHY_INTERFACE_MODE_UTMIW:
+       case USBPHY_INTERFACE_MODE_HSIC:
+               ret = usb_phy_init(ci->transceiver);
+               if (ret)
+                       return ret;
+               hw_phymode_configure(ci);
+               break;
+       case USBPHY_INTERFACE_MODE_ULPI:
+       case USBPHY_INTERFACE_MODE_SERIAL:
+               hw_phymode_configure(ci);
+               ret = usb_phy_init(ci->transceiver);
+               if (ret)
+                       return ret;
+               break;
+       default:
+               ret = usb_phy_init(ci->transceiver);
+       }
+
+       return ret;
+}
+
 /**
  * hw_device_reset: resets chip (execute without interruption)
  * @ci: the controller
@@ -543,8 +576,6 @@ static int ci_hdrc_probe(struct platform_device *pdev)
                return -ENODEV;
        }
 
-       hw_phymode_configure(ci);
-
        if (ci->platdata->phy)
                ci->transceiver = ci->platdata->phy;
        else
@@ -564,7 +595,7 @@ static int ci_hdrc_probe(struct platform_device *pdev)
                return -EPROBE_DEFER;
        }
 
-       ret = usb_phy_init(ci->transceiver);
+       ret = ci_usb_phy_init(ci);
        if (ret) {
                dev_err(dev, "unable to init phy: %d\n", ret);
                return ret;
index d001417e8e370cd7f3e7fa2d9b9c2678271987dc..10aaaae9af25e8f6b797f063b5e5ba64ec37c847 100644 (file)
@@ -821,6 +821,7 @@ static void dwc3_complete(struct device *dev)
 
        spin_lock_irqsave(&dwc->lock, flags);
 
+       dwc3_event_buffers_setup(dwc);
        switch (dwc->dr_mode) {
        case USB_DR_MODE_PERIPHERAL:
        case USB_DR_MODE_OTG:
@@ -828,7 +829,6 @@ static void dwc3_complete(struct device *dev)
                /* FALLTHROUGH */
        case USB_DR_MODE_HOST:
        default:
-               dwc3_event_buffers_setup(dwc);
                break;
        }
 
index a740eac74d56d502675107f03ffbd324e85f1f63..70715eeededda3b4acfab6bff72f13531338a8b2 100644 (file)
@@ -187,15 +187,12 @@ int dwc3_gadget_resize_tx_fifos(struct dwc3 *dwc)
         * improve this algorithm so that we better use the internal
         * FIFO space
         */
-       for (num = 0; num < DWC3_ENDPOINTS_NUM; num++) {
-               struct dwc3_ep  *dep = dwc->eps[num];
-               int             fifo_number = dep->number >> 1;
+       for (num = 0; num < dwc->num_in_eps; num++) {
+               /* bit0 indicates direction; 1 means IN ep */
+               struct dwc3_ep  *dep = dwc->eps[(num << 1) | 1];
                int             mult = 1;
                int             tmp;
 
-               if (!(dep->number & 1))
-                       continue;
-
                if (!(dep->flags & DWC3_EP_ENABLED))
                        continue;
 
@@ -224,8 +221,7 @@ int dwc3_gadget_resize_tx_fifos(struct dwc3 *dwc)
                dev_vdbg(dwc->dev, "%s: Fifo Addr %04x Size %d\n",
                                dep->name, last_fifo_depth, fifo_size & 0xffff);
 
-               dwc3_writel(dwc->regs, DWC3_GTXFIFOSIZ(fifo_number),
-                               fifo_size);
+               dwc3_writel(dwc->regs, DWC3_GTXFIFOSIZ(num), fifo_size);
 
                last_fifo_depth += (fifo_size & 0xffff);
        }
index f605ad8c1902fc775cffc39dcd53be8d65fe3a8f..cfd18bcca723ef700d727fa1f8f8a39405eba13f 100644 (file)
@@ -1709,16 +1709,6 @@ static int at91udc_probe(struct platform_device *pdev)
                return -ENODEV;
        }
 
-       if (pdev->num_resources != 2) {
-               DBG("invalid num_resources\n");
-               return -ENODEV;
-       }
-       if ((pdev->resource[0].flags != IORESOURCE_MEM)
-                       || (pdev->resource[1].flags != IORESOURCE_IRQ)) {
-               DBG("invalid resource type\n");
-               return -ENODEV;
-       }
-
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        if (!res)
                return -ENXIO;
index 2e164dca08e89fc29ea1887f0afe7b2a09e1a5af..1e12b3ee56fd837117a03266cf66205bb418b366 100644 (file)
@@ -745,6 +745,12 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data)
                 */
                struct usb_gadget *gadget = epfile->ffs->gadget;
 
+               spin_lock_irq(&epfile->ffs->eps_lock);
+               /* In the meantime, endpoint got disabled or changed. */
+               if (epfile->ep != ep) {
+                       spin_unlock_irq(&epfile->ffs->eps_lock);
+                       return -ESHUTDOWN;
+               }
                /*
                 * Controller may require buffer size to be aligned to
                 * maxpacketsize of an out endpoint.
@@ -752,6 +758,7 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data)
                data_len = io_data->read ?
                           usb_ep_align_maybe(gadget, ep->ep, io_data->len) :
                           io_data->len;
+               spin_unlock_irq(&epfile->ffs->eps_lock);
 
                data = kmalloc(data_len, GFP_KERNEL);
                if (unlikely(!data))
index c11761ce511302fbc386791937a4fecf8a98cfa0..9a4f49dc6ac4f879cfac9496c49951b21c18fe71 100644 (file)
@@ -377,7 +377,7 @@ static struct sk_buff *rndis_add_header(struct gether *port,
        if (skb2)
                rndis_add_hdr(skb2);
 
-       dev_kfree_skb_any(skb);
+       dev_kfree_skb(skb);
        return skb2;
 }
 
index 15960af0f67ea7805e22e9fea6671c2635e16153..a2f26cdb56fef07a0a15e8bb329c089413efab6a 100644 (file)
@@ -1219,6 +1219,10 @@ static int fsl_pullup(struct usb_gadget *gadget, int is_on)
        struct fsl_udc *udc;
 
        udc = container_of(gadget, struct fsl_udc, gadget);
+
+       if (!udc->vbus_active)
+               return -EOPNOTSUPP;
+
        udc->softconnect = (is_on != 0);
        if (can_pullup(udc))
                fsl_writel((fsl_readl(&dr_regs->usbcmd) | USB_CMD_RUN_STOP),
@@ -2532,8 +2536,8 @@ static int __exit fsl_udc_remove(struct platform_device *pdev)
        if (!udc_controller)
                return -ENODEV;
 
-       usb_del_gadget_udc(&udc_controller->gadget);
        udc_controller->done = &done;
+       usb_del_gadget_udc(&udc_controller->gadget);
 
        fsl_udc_clk_release();
 
index b5be6f0308c270f2a844db3f00e7884380e5c2e6..a925d0cbcd4199d777071408f8e335119932b3ab 100644 (file)
@@ -2043,6 +2043,7 @@ gadgetfs_fill_super (struct super_block *sb, void *opts, int silent)
                return -ESRCH;
 
        /* fake probe to determine $CHIP */
+       CHIP = NULL;
        usb_gadget_probe_driver(&probe_driver);
        if (!CHIP)
                return -ENODEV;
index d822d822efb34d38b1621d4d63478c82cd6ec9e3..7ed452d90f4d76c9d48c2bb3c2f55a8d40a0f5e9 100644 (file)
@@ -35,6 +35,7 @@
 #include <asm/byteorder.h>
 #include <asm/unaligned.h>
 
+#include "u_rndis.h"
 
 #undef VERBOSE_DEBUG
 
index 50d09c289137024271a480acab22ff1d8443d96e..ce8e28146162723a1b9b2f0bf7db34f66065d09b 100644 (file)
@@ -48,8 +48,6 @@
 
 #define UETH__VERSION  "29-May-2008"
 
-#define GETHER_NAPI_WEIGHT     32
-
 struct eth_dev {
        /* lock is held while accessing port_usb
         */
@@ -74,7 +72,6 @@ struct eth_dev {
                                                struct sk_buff_head *list);
 
        struct work_struct      work;
-       struct napi_struct      rx_napi;
 
        unsigned long           todo;
 #define        WORK_RX_MEMORY          0
@@ -256,16 +253,18 @@ enomem:
                DBG(dev, "rx submit --> %d\n", retval);
                if (skb)
                        dev_kfree_skb_any(skb);
+               spin_lock_irqsave(&dev->req_lock, flags);
+               list_add(&req->list, &dev->rx_reqs);
+               spin_unlock_irqrestore(&dev->req_lock, flags);
        }
        return retval;
 }
 
 static void rx_complete(struct usb_ep *ep, struct usb_request *req)
 {
-       struct sk_buff  *skb = req->context;
+       struct sk_buff  *skb = req->context, *skb2;
        struct eth_dev  *dev = ep->driver_data;
        int             status = req->status;
-       bool            rx_queue = 0;
 
        switch (status) {
 
@@ -289,8 +288,30 @@ static void rx_complete(struct usb_ep *ep, struct usb_request *req)
                } else {
                        skb_queue_tail(&dev->rx_frames, skb);
                }
-               if (!status)
-                       rx_queue = 1;
+               skb = NULL;
+
+               skb2 = skb_dequeue(&dev->rx_frames);
+               while (skb2) {
+                       if (status < 0
+                                       || ETH_HLEN > skb2->len
+                                       || skb2->len > VLAN_ETH_FRAME_LEN) {
+                               dev->net->stats.rx_errors++;
+                               dev->net->stats.rx_length_errors++;
+                               DBG(dev, "rx length %d\n", skb2->len);
+                               dev_kfree_skb_any(skb2);
+                               goto next_frame;
+                       }
+                       skb2->protocol = eth_type_trans(skb2, dev->net);
+                       dev->net->stats.rx_packets++;
+                       dev->net->stats.rx_bytes += skb2->len;
+
+                       /* no buffer copies needed, unless hardware can't
+                        * use skb buffers.
+                        */
+                       status = netif_rx(skb2);
+next_frame:
+                       skb2 = skb_dequeue(&dev->rx_frames);
+               }
                break;
 
        /* software-driven interface shutdown */
@@ -313,20 +334,22 @@ quiesce:
                /* FALLTHROUGH */
 
        default:
-               rx_queue = 1;
-               dev_kfree_skb_any(skb);
                dev->net->stats.rx_errors++;
                DBG(dev, "rx status %d\n", status);
                break;
        }
 
+       if (skb)
+               dev_kfree_skb_any(skb);
+       if (!netif_running(dev->net)) {
 clean:
                spin_lock(&dev->req_lock);
                list_add(&req->list, &dev->rx_reqs);
                spin_unlock(&dev->req_lock);
-
-       if (rx_queue && likely(napi_schedule_prep(&dev->rx_napi)))
-               __napi_schedule(&dev->rx_napi);
+               req = NULL;
+       }
+       if (req)
+               rx_submit(dev, req, GFP_ATOMIC);
 }
 
 static int prealloc(struct list_head *list, struct usb_ep *ep, unsigned n)
@@ -391,24 +414,16 @@ static void rx_fill(struct eth_dev *dev, gfp_t gfp_flags)
 {
        struct usb_request      *req;
        unsigned long           flags;
-       int                     rx_counts = 0;
 
        /* fill unused rxq slots with some skb */
        spin_lock_irqsave(&dev->req_lock, flags);
        while (!list_empty(&dev->rx_reqs)) {
-
-               if (++rx_counts > qlen(dev->gadget, dev->qmult))
-                       break;
-
                req = container_of(dev->rx_reqs.next,
                                struct usb_request, list);
                list_del_init(&req->list);
                spin_unlock_irqrestore(&dev->req_lock, flags);
 
                if (rx_submit(dev, req, gfp_flags) < 0) {
-                       spin_lock_irqsave(&dev->req_lock, flags);
-                       list_add(&req->list, &dev->rx_reqs);
-                       spin_unlock_irqrestore(&dev->req_lock, flags);
                        defer_kevent(dev, WORK_RX_MEMORY);
                        return;
                }
@@ -418,41 +433,6 @@ static void rx_fill(struct eth_dev *dev, gfp_t gfp_flags)
        spin_unlock_irqrestore(&dev->req_lock, flags);
 }
 
-static int gether_poll(struct napi_struct *napi, int budget)
-{
-       struct eth_dev  *dev = container_of(napi, struct eth_dev, rx_napi);
-       struct sk_buff  *skb;
-       unsigned int    work_done = 0;
-       int             status = 0;
-
-       while ((skb = skb_dequeue(&dev->rx_frames))) {
-               if (status < 0
-                               || ETH_HLEN > skb->len
-                               || skb->len > VLAN_ETH_FRAME_LEN) {
-                       dev->net->stats.rx_errors++;
-                       dev->net->stats.rx_length_errors++;
-                       DBG(dev, "rx length %d\n", skb->len);
-                       dev_kfree_skb_any(skb);
-                       continue;
-               }
-               skb->protocol = eth_type_trans(skb, dev->net);
-               dev->net->stats.rx_packets++;
-               dev->net->stats.rx_bytes += skb->len;
-
-               status = netif_rx_ni(skb);
-       }
-
-       if (netif_running(dev->net)) {
-               rx_fill(dev, GFP_KERNEL);
-               work_done++;
-       }
-
-       if (work_done < budget)
-               napi_complete(&dev->rx_napi);
-
-       return work_done;
-}
-
 static void eth_work(struct work_struct *work)
 {
        struct eth_dev  *dev = container_of(work, struct eth_dev, work);
@@ -645,7 +625,6 @@ static void eth_start(struct eth_dev *dev, gfp_t gfp_flags)
        /* and open the tx floodgates */
        atomic_set(&dev->tx_qlen, 0);
        netif_wake_queue(dev->net);
-       napi_enable(&dev->rx_napi);
 }
 
 static int eth_open(struct net_device *net)
@@ -672,7 +651,6 @@ static int eth_stop(struct net_device *net)
        unsigned long   flags;
 
        VDBG(dev, "%s\n", __func__);
-       napi_disable(&dev->rx_napi);
        netif_stop_queue(net);
 
        DBG(dev, "stop stats: rx/tx %ld/%ld, errs %ld/%ld\n",
@@ -790,7 +768,6 @@ struct eth_dev *gether_setup_name(struct usb_gadget *g,
                return ERR_PTR(-ENOMEM);
 
        dev = netdev_priv(net);
-       netif_napi_add(net, &dev->rx_napi, gether_poll, GETHER_NAPI_WEIGHT);
        spin_lock_init(&dev->lock);
        spin_lock_init(&dev->req_lock);
        INIT_WORK(&dev->work, eth_work);
@@ -816,7 +793,7 @@ struct eth_dev *gether_setup_name(struct usb_gadget *g,
 
        net->netdev_ops = &eth_netdev_ops;
 
-       SET_ETHTOOL_OPS(net, &ops);
+       net->ethtool_ops = &ops;
 
        dev->gadget = g;
        SET_NETDEV_DEV(net, &g->dev);
@@ -853,7 +830,6 @@ struct net_device *gether_setup_name_default(const char *netname)
                return ERR_PTR(-ENOMEM);
 
        dev = netdev_priv(net);
-       netif_napi_add(net, &dev->rx_napi, gether_poll, GETHER_NAPI_WEIGHT);
        spin_lock_init(&dev->lock);
        spin_lock_init(&dev->req_lock);
        INIT_WORK(&dev->work, eth_work);
@@ -874,7 +850,7 @@ struct net_device *gether_setup_name_default(const char *netname)
 
        net->netdev_ops = &eth_netdev_ops;
 
-       SET_ETHTOOL_OPS(net, &ops);
+       net->ethtool_ops = &ops;
        SET_NETDEV_DEVTYPE(net, &gadget_type);
 
        return net;
@@ -1137,7 +1113,6 @@ void gether_disconnect(struct gether *link)
 {
        struct eth_dev          *dev = link->ioport;
        struct usb_request      *req;
-       struct sk_buff          *skb;
 
        WARN_ON(!dev);
        if (!dev)
@@ -1164,12 +1139,6 @@ void gether_disconnect(struct gether *link)
                spin_lock(&dev->req_lock);
        }
        spin_unlock(&dev->req_lock);
-
-       spin_lock(&dev->rx_frames.lock);
-       while ((skb = __skb_dequeue(&dev->rx_frames)))
-               dev_kfree_skb_any(skb);
-       spin_unlock(&dev->rx_frames.lock);
-
        link->in_ep->driver_data = NULL;
        link->in_ep->desc = NULL;
 
index 9f170c53e3d9a96d97c6978502ba6f4240189438..134f354ede62e1b9f35ff189b0feda75aad4ac92 100644 (file)
@@ -300,7 +300,7 @@ static int __init zero_bind(struct usb_composite_dev *cdev)
        ss_opts->isoc_interval = gzero_options.isoc_interval;
        ss_opts->isoc_maxpacket = gzero_options.isoc_maxpacket;
        ss_opts->isoc_mult = gzero_options.isoc_mult;
-       ss_opts->isoc_maxburst = gzero_options.isoc_maxpacket;
+       ss_opts->isoc_maxburst = gzero_options.isoc_maxburst;
        ss_opts->bulk_buflen = gzero_options.bulk_buflen;
 
        func_ss = usb_get_function(func_inst_ss);
index 6f2c8d3899d2cfb00f14fe641341f68944943213..cf2734b532a7ab288d24dd13fd79db364490a748 100644 (file)
@@ -248,7 +248,8 @@ static int ehci_fsl_setup_phy(struct usb_hcd *hcd,
                break;
        }
 
-       if (pdata->have_sysif_regs && pdata->controller_ver &&
+       if (pdata->have_sysif_regs &&
+           pdata->controller_ver > FSL_USB_VER_1_6 &&
            (phy_mode == FSL_USB2_PHY_ULPI)) {
                /* check PHY_CLK_VALID to get phy clk valid */
                if (!(spin_event_timeout(in_be32(non_ehci + FSL_SOC_USB_CTRL) &
index c81c8721cc5a9e3d07e20c29d9d40e540136409e..cd871b89501325407af3ab3ebf0eb56ab0814a79 100644 (file)
@@ -90,6 +90,24 @@ __acquires(ohci->lock)
        dl_done_list (ohci);
        finish_unlinks (ohci, ohci_frame_no(ohci));
 
+       /*
+        * Some controllers don't handle "global" suspend properly if
+        * there are unsuspended ports.  For these controllers, put all
+        * the enabled ports into suspend before suspending the root hub.
+        */
+       if (ohci->flags & OHCI_QUIRK_GLOBAL_SUSPEND) {
+               __hc32 __iomem  *portstat = ohci->regs->roothub.portstatus;
+               int             i;
+               unsigned        temp;
+
+               for (i = 0; i < ohci->num_ports; (++i, ++portstat)) {
+                       temp = ohci_readl(ohci, portstat);
+                       if ((temp & (RH_PS_PES | RH_PS_PSS)) ==
+                                       RH_PS_PES)
+                               ohci_writel(ohci, RH_PS_PSS, portstat);
+               }
+       }
+
        /* maybe resume can wake root hub */
        if (ohci_to_hcd(ohci)->self.root_hub->do_remote_wakeup || autostop) {
                ohci->hc_control |= OHCI_CTRL_RWE;
index 90879e9ccbec302e8c5272d45e4847009b3730c4..bb1509675727b374586d61917920578cc7631a45 100644 (file)
@@ -160,6 +160,7 @@ static int ohci_quirk_amd700(struct usb_hcd *hcd)
                ohci_dbg(ohci, "enabled AMD prefetch quirk\n");
        }
 
+       ohci->flags |= OHCI_QUIRK_GLOBAL_SUSPEND;
        return 0;
 }
 
index 9250cada13f0b3e9a22711de3345b67de1627e01..4550ce05af7fa1d1b96c03dc2590c5c0cc943615 100644 (file)
@@ -405,6 +405,8 @@ struct ohci_hcd {
 #define        OHCI_QUIRK_HUB_POWER    0x100                   /* distrust firmware power/oc setup */
 #define        OHCI_QUIRK_AMD_PLL      0x200                   /* AMD PLL quirk*/
 #define        OHCI_QUIRK_AMD_PREFETCH 0x400                   /* pre-fetch for ISO transfer */
+#define        OHCI_QUIRK_GLOBAL_SUSPEND       0x800           /* must suspend ports */
+
        // there are also chip quirks/bugs in init logic
 
        struct work_struct      nec_work;       /* Worker for NEC quirk */
index 47390e369cd402f776c20bbb35b4c37223f04057..35d44778070786109262d96c95341758675a41e3 100644 (file)
@@ -134,6 +134,8 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
                 */
                if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP)
                        xhci->quirks |= XHCI_SPURIOUS_WAKEUP;
+
+               xhci->quirks |= XHCI_SPURIOUS_REBOOT;
        }
        if (pdev->vendor == PCI_VENDOR_ID_ETRON &&
                        pdev->device == PCI_DEVICE_ID_ASROCK_P67) {
@@ -143,9 +145,7 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
                xhci->quirks |= XHCI_TRUST_TX_LENGTH;
        }
        if (pdev->vendor == PCI_VENDOR_ID_RENESAS &&
-                       pdev->device == 0x0015 &&
-                       pdev->subsystem_vendor == PCI_VENDOR_ID_SAMSUNG &&
-                       pdev->subsystem_device == 0xc0cd)
+                       pdev->device == 0x0015)
                xhci->quirks |= XHCI_RESET_ON_RESUME;
        if (pdev->vendor == PCI_VENDOR_ID_VIA)
                xhci->quirks |= XHCI_RESET_ON_RESUME;
index 5f926bea5ab1d87a54054ad68555ea31bd62cb63..7a0e3c720c005faed89e3caa55eeff73f1fd113c 100644 (file)
@@ -550,6 +550,7 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
        struct xhci_ring *ep_ring;
        struct xhci_generic_trb *trb;
        dma_addr_t addr;
+       u64 hw_dequeue;
 
        ep_ring = xhci_triad_to_transfer_ring(xhci, slot_id,
                        ep_index, stream_id);
@@ -559,16 +560,6 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
                                stream_id);
                return;
        }
-       state->new_cycle_state = 0;
-       xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
-                       "Finding segment containing stopped TRB.");
-       state->new_deq_seg = find_trb_seg(cur_td->start_seg,
-                       dev->eps[ep_index].stopped_trb,
-                       &state->new_cycle_state);
-       if (!state->new_deq_seg) {
-               WARN_ON(1);
-               return;
-       }
 
        /* Dig out the cycle state saved by the xHC during the stop ep cmd */
        xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
@@ -577,46 +568,57 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
        if (ep->ep_state & EP_HAS_STREAMS) {
                struct xhci_stream_ctx *ctx =
                        &ep->stream_info->stream_ctx_array[stream_id];
-               state->new_cycle_state = 0x1 & le64_to_cpu(ctx->stream_ring);
+               hw_dequeue = le64_to_cpu(ctx->stream_ring);
        } else {
                struct xhci_ep_ctx *ep_ctx
                        = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index);
-               state->new_cycle_state = 0x1 & le64_to_cpu(ep_ctx->deq);
+               hw_dequeue = le64_to_cpu(ep_ctx->deq);
        }
 
+       /* Find virtual address and segment of hardware dequeue pointer */
+       state->new_deq_seg = ep_ring->deq_seg;
+       state->new_deq_ptr = ep_ring->dequeue;
+       while (xhci_trb_virt_to_dma(state->new_deq_seg, state->new_deq_ptr)
+                       != (dma_addr_t)(hw_dequeue & ~0xf)) {
+               next_trb(xhci, ep_ring, &state->new_deq_seg,
+                                       &state->new_deq_ptr);
+               if (state->new_deq_ptr == ep_ring->dequeue) {
+                       WARN_ON(1);
+                       return;
+               }
+       }
+       /*
+        * Find cycle state for last_trb, starting at old cycle state of
+        * hw_dequeue. If there is only one segment ring, find_trb_seg() will
+        * return immediately and cannot toggle the cycle state if this search
+        * wraps around, so add one more toggle manually in that case.
+        */
+       state->new_cycle_state = hw_dequeue & 0x1;
+       if (ep_ring->first_seg == ep_ring->first_seg->next &&
+                       cur_td->last_trb < state->new_deq_ptr)
+               state->new_cycle_state ^= 0x1;
+
        state->new_deq_ptr = cur_td->last_trb;
        xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
                        "Finding segment containing last TRB in TD.");
        state->new_deq_seg = find_trb_seg(state->new_deq_seg,
-                       state->new_deq_ptr,
-                       &state->new_cycle_state);
+                       state->new_deq_ptr, &state->new_cycle_state);
        if (!state->new_deq_seg) {
                WARN_ON(1);
                return;
        }
 
+       /* Increment to find next TRB after last_trb. Cycle if appropriate. */
        trb = &state->new_deq_ptr->generic;
        if (TRB_TYPE_LINK_LE32(trb->field[3]) &&
            (trb->field[3] & cpu_to_le32(LINK_TOGGLE)))
                state->new_cycle_state ^= 0x1;
        next_trb(xhci, ep_ring, &state->new_deq_seg, &state->new_deq_ptr);
 
-       /*
-        * If there is only one segment in a ring, find_trb_seg()'s while loop
-        * will not run, and it will return before it has a chance to see if it
-        * needs to toggle the cycle bit.  It can't tell if the stalled transfer
-        * ended just before the link TRB on a one-segment ring, or if the TD
-        * wrapped around the top of the ring, because it doesn't have the TD in
-        * question.  Look for the one-segment case where stalled TRB's address
-        * is greater than the new dequeue pointer address.
-        */
-       if (ep_ring->first_seg == ep_ring->first_seg->next &&
-                       state->new_deq_ptr < dev->eps[ep_index].stopped_trb)
-               state->new_cycle_state ^= 0x1;
+       /* Don't update the ring cycle state for the producer (us). */
        xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
                        "Cycle state = 0x%x", state->new_cycle_state);
 
-       /* Don't update the ring cycle state for the producer (us). */
        xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
                        "New dequeue segment = %p (virtual)",
                        state->new_deq_seg);
@@ -799,7 +801,6 @@ static void xhci_handle_cmd_stop_ep(struct xhci_hcd *xhci, int slot_id,
        if (list_empty(&ep->cancelled_td_list)) {
                xhci_stop_watchdog_timer_in_irq(xhci, ep);
                ep->stopped_td = NULL;
-               ep->stopped_trb = NULL;
                ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
                return;
        }
@@ -867,11 +868,9 @@ remove_finished_td:
                ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
        }
 
-       /* Clear stopped_td and stopped_trb if endpoint is not halted */
-       if (!(ep->ep_state & EP_HALTED)) {
+       /* Clear stopped_td if endpoint is not halted */
+       if (!(ep->ep_state & EP_HALTED))
                ep->stopped_td = NULL;
-               ep->stopped_trb = NULL;
-       }
 
        /*
         * Drop the lock and complete the URBs in the cancelled TD list.
@@ -1941,14 +1940,12 @@ static void xhci_cleanup_halted_endpoint(struct xhci_hcd *xhci,
        struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index];
        ep->ep_state |= EP_HALTED;
        ep->stopped_td = td;
-       ep->stopped_trb = event_trb;
        ep->stopped_stream = stream_id;
 
        xhci_queue_reset_ep(xhci, slot_id, ep_index);
        xhci_cleanup_stalled_ring(xhci, td->urb->dev, ep_index);
 
        ep->stopped_td = NULL;
-       ep->stopped_trb = NULL;
        ep->stopped_stream = 0;
 
        xhci_ring_cmd_db(xhci);
@@ -2030,7 +2027,6 @@ static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td,
                 * the ring dequeue pointer or take this TD off any lists yet.
                 */
                ep->stopped_td = td;
-               ep->stopped_trb = event_trb;
                return 0;
        } else {
                if (trb_comp_code == COMP_STALL) {
@@ -2042,7 +2038,6 @@ static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td,
                         * USB class driver clear the stall later.
                         */
                        ep->stopped_td = td;
-                       ep->stopped_trb = event_trb;
                        ep->stopped_stream = ep_ring->stream_id;
                } else if (xhci_requires_manual_halt_cleanup(xhci,
                                        ep_ctx, trb_comp_code)) {
index 8fe4e124ddd49f17fb3ac6d2088cedc15758d7b2..300836972faa41cb11b3c61bf4423f702e2d1d0c 100644 (file)
@@ -408,16 +408,16 @@ static int xhci_try_enable_msi(struct usb_hcd *hcd)
 
 #else
 
-static int xhci_try_enable_msi(struct usb_hcd *hcd)
+static inline int xhci_try_enable_msi(struct usb_hcd *hcd)
 {
        return 0;
 }
 
-static void xhci_cleanup_msix(struct xhci_hcd *xhci)
+static inline void xhci_cleanup_msix(struct xhci_hcd *xhci)
 {
 }
 
-static void xhci_msix_sync_irqs(struct xhci_hcd *xhci)
+static inline void xhci_msix_sync_irqs(struct xhci_hcd *xhci)
 {
 }
 
@@ -2954,7 +2954,6 @@ void xhci_endpoint_reset(struct usb_hcd *hcd,
                xhci_ring_cmd_db(xhci);
        }
        virt_ep->stopped_td = NULL;
-       virt_ep->stopped_trb = NULL;
        virt_ep->stopped_stream = 0;
        spin_unlock_irqrestore(&xhci->lock, flags);
 
index d280e9213d08614002030573afcb2b93a5bbe9c8..4746816aed3e7c42097d10986acfff09e19ac27b 100644 (file)
@@ -865,8 +865,6 @@ struct xhci_virt_ep {
 #define EP_GETTING_NO_STREAMS  (1 << 5)
        /* ----  Related to URB cancellation ---- */
        struct list_head        cancelled_td_list;
-       /* The TRB that was last reported in a stopped endpoint ring */
-       union xhci_trb          *stopped_trb;
        struct xhci_td          *stopped_td;
        unsigned int            stopped_stream;
        /* Watchdog timer for stop endpoint command to cancel URBs */
index 3372ded5def79853e75ac79ecc66b74d73c63542..e2fd263585de3fb2d8cb5cab3e782eff5675a198 100644 (file)
@@ -470,8 +470,9 @@ static int dsps_musb_exit(struct musb *musb)
        struct dsps_glue *glue = dev_get_drvdata(dev->parent);
 
        del_timer_sync(&glue->timer);
-
        usb_phy_shutdown(musb->xceiv);
+       debugfs_remove_recursive(glue->dbgfs_root);
+
        return 0;
 }
 
@@ -708,8 +709,6 @@ static int dsps_remove(struct platform_device *pdev)
        pm_runtime_put(&pdev->dev);
        pm_runtime_disable(&pdev->dev);
 
-       debugfs_remove_recursive(glue->dbgfs_root);
-
        return 0;
 }
 
index d341c149a2f90c1372201b9a78aff8a89d4f3e6f..d369bf1f3936cba910de97a4859c27174dbed8a5 100644 (file)
@@ -316,7 +316,13 @@ static void omap_musb_mailbox_work(struct work_struct *mailbox_work)
 {
        struct omap2430_glue *glue = container_of(mailbox_work,
                                struct omap2430_glue, omap_musb_mailbox_work);
+       struct musb *musb = glue_to_musb(glue);
+       struct device *dev = musb->controller;
+
+       pm_runtime_get_sync(dev);
        omap_musb_set_mailbox(glue);
+       pm_runtime_mark_last_busy(dev);
+       pm_runtime_put_autosuspend(dev);
 }
 
 static irqreturn_t omap2430_musb_interrupt(int irq, void *__hci)
@@ -416,6 +422,7 @@ static int omap2430_musb_init(struct musb *musb)
                omap_musb_set_mailbox(glue);
 
        phy_init(musb->phy);
+       phy_power_on(musb->phy);
 
        pm_runtime_put_noidle(musb->controller);
        return 0;
@@ -478,6 +485,7 @@ static int omap2430_musb_exit(struct musb *musb)
        del_timer_sync(&musb_idle_timer);
 
        omap2430_low_level_exit(musb);
+       phy_power_off(musb->phy);
        phy_exit(musb->phy);
 
        return 0;
index d75196ad5f2f6e7a13a76e16f283464ae875507e..35b6083b799949bc30f4f359ee62a55542959ca9 100644 (file)
@@ -3,6 +3,7 @@
 #include <linux/err.h>
 #include <linux/of.h>
 #include <linux/io.h>
+#include <linux/delay.h>
 #include "am35x-phy-control.h"
 
 struct am335x_control_usb {
@@ -86,6 +87,14 @@ static void am335x_phy_power(struct phy_control *phy_ctrl, u32 id, bool on)
        }
 
        writel(val, usb_ctrl->phy_reg + reg);
+
+       /*
+        * Give the PHY ~1ms to complete the power up operation.
+        * Tests have shown unstable behaviour if other USB PHY related
+        * registers are written too shortly after such a transition.
+        */
+       if (on)
+               mdelay(1);
 }
 
 static const struct phy_control ctrl_am335x = {
index c47e5a6edde28a1d35d64d6f375df401a8519709..d03fadd2629f1419b00a60ef9d842913c5ae6183 100644 (file)
@@ -303,17 +303,18 @@ int otg_statemachine(struct otg_fsm *fsm)
                        otg_set_state(fsm, OTG_STATE_A_WAIT_VRISE);
                break;
        case OTG_STATE_A_WAIT_VRISE:
-               if (fsm->id || fsm->a_bus_drop || fsm->a_vbus_vld ||
-                               fsm->a_wait_vrise_tmout) {
+               if (fsm->a_vbus_vld)
                        otg_set_state(fsm, OTG_STATE_A_WAIT_BCON);
-               }
+               else if (fsm->id || fsm->a_bus_drop ||
+                               fsm->a_wait_vrise_tmout)
+                       otg_set_state(fsm, OTG_STATE_A_WAIT_VFALL);
                break;
        case OTG_STATE_A_WAIT_BCON:
                if (!fsm->a_vbus_vld)
                        otg_set_state(fsm, OTG_STATE_A_VBUS_ERR);
                else if (fsm->b_conn)
                        otg_set_state(fsm, OTG_STATE_A_HOST);
-               else if (fsm->id | fsm->a_bus_drop | fsm->a_wait_bcon_tmout)
+               else if (fsm->id || fsm->a_bus_drop || fsm->a_wait_bcon_tmout)
                        otg_set_state(fsm, OTG_STATE_A_WAIT_VFALL);
                break;
        case OTG_STATE_A_HOST:
index 8afa813d690bc6f7aa15c9b9c7523cf96b24099a..36b6bce33b20c17df4583e5ce5e9b07298335842 100644 (file)
@@ -132,6 +132,9 @@ struct usb_phy *usb_get_phy(enum usb_phy_type type)
        if (IS_ERR(phy) || !try_module_get(phy->dev->driver->owner)) {
                pr_debug("PHY: unable to find transceiver of type %s\n",
                        usb_phy_type_string(type));
+               if (!IS_ERR(phy))
+                       phy = ERR_PTR(-ENODEV);
+
                goto err0;
        }
 
index a2db5be9c30534ceae25eb262bde00242a152e2d..df90dae53eb97cde7d9e1d6ebf47233c7b3bb149 100644 (file)
@@ -28,6 +28,7 @@
 #include <linux/spinlock.h>
 #include <linux/mutex.h>
 #include <linux/serial.h>
+#include <linux/swab.h>
 #include <linux/kfifo.h>
 #include <linux/ioctl.h>
 #include <linux/firmware.h>
@@ -280,7 +281,7 @@ static int read_download_mem(struct usb_device *dev, int start_address,
 {
        int status = 0;
        __u8 read_length;
-       __be16 be_start_address;
+       u16 be_start_address;
 
        dev_dbg(&dev->dev, "%s - @ %x for %d\n", __func__, start_address, length);
 
@@ -296,10 +297,14 @@ static int read_download_mem(struct usb_device *dev, int start_address,
                if (read_length > 1) {
                        dev_dbg(&dev->dev, "%s - @ %x for %d\n", __func__, start_address, read_length);
                }
-               be_start_address = cpu_to_be16(start_address);
+               /*
+                * NOTE: Must use swab as wIndex is sent in little-endian
+                *       byte order regardless of host byte order.
+                */
+               be_start_address = swab16((u16)start_address);
                status = ti_vread_sync(dev, UMPC_MEMORY_READ,
                                        (__u16)address_type,
-                                       (__force __u16)be_start_address,
+                                       be_start_address,
                                        buffer, read_length);
 
                if (status) {
@@ -394,7 +399,7 @@ static int write_i2c_mem(struct edgeport_serial *serial,
        struct device *dev = &serial->serial->dev->dev;
        int status = 0;
        int write_length;
-       __be16 be_start_address;
+       u16 be_start_address;
 
        /* We can only send a maximum of 1 aligned byte page at a time */
 
@@ -409,11 +414,16 @@ static int write_i2c_mem(struct edgeport_serial *serial,
                __func__, start_address, write_length);
        usb_serial_debug_data(dev, __func__, write_length, buffer);
 
-       /* Write first page */
-       be_start_address = cpu_to_be16(start_address);
+       /*
+        * Write first page.
+        *
+        * NOTE: Must use swab as wIndex is sent in little-endian byte order
+        *       regardless of host byte order.
+        */
+       be_start_address = swab16((u16)start_address);
        status = ti_vsend_sync(serial->serial->dev,
                                UMPC_MEMORY_WRITE, (__u16)address_type,
-                               (__force __u16)be_start_address,
+                               be_start_address,
                                buffer, write_length);
        if (status) {
                dev_dbg(dev, "%s - ERROR %d\n", __func__, status);
@@ -436,11 +446,16 @@ static int write_i2c_mem(struct edgeport_serial *serial,
                        __func__, start_address, write_length);
                usb_serial_debug_data(dev, __func__, write_length, buffer);
 
-               /* Write next page */
-               be_start_address = cpu_to_be16(start_address);
+               /*
+                * Write next page.
+                *
+                * NOTE: Must use swab as wIndex is sent in little-endian byte
+                *       order regardless of host byte order.
+                */
+               be_start_address = swab16((u16)start_address);
                status = ti_vsend_sync(serial->serial->dev, UMPC_MEMORY_WRITE,
                                (__u16)address_type,
-                               (__force __u16)be_start_address,
+                               be_start_address,
                                buffer, write_length);
                if (status) {
                        dev_err(dev, "%s - ERROR %d\n", __func__, status);
@@ -585,8 +600,8 @@ static int get_descriptor_addr(struct edgeport_serial *serial,
                if (rom_desc->Type == desc_type)
                        return start_address;
 
-               start_address = start_address + sizeof(struct ti_i2c_desc)
-                                                       + rom_desc->Size;
+               start_address = start_address + sizeof(struct ti_i2c_desc) +
+                                               le16_to_cpu(rom_desc->Size);
 
        } while ((start_address < TI_MAX_I2C_SIZE) && rom_desc->Type);
 
@@ -599,7 +614,7 @@ static int valid_csum(struct ti_i2c_desc *rom_desc, __u8 *buffer)
        __u16 i;
        __u8 cs = 0;
 
-       for (i = 0; i < rom_desc->Size; i++)
+       for (i = 0; i < le16_to_cpu(rom_desc->Size); i++)
                cs = (__u8)(cs + buffer[i]);
 
        if (cs != rom_desc->CheckSum) {
@@ -650,7 +665,7 @@ static int check_i2c_image(struct edgeport_serial *serial)
                        break;
 
                if ((start_address + sizeof(struct ti_i2c_desc) +
-                                       rom_desc->Size) > TI_MAX_I2C_SIZE) {
+                       le16_to_cpu(rom_desc->Size)) > TI_MAX_I2C_SIZE) {
                        status = -ENODEV;
                        dev_dbg(dev, "%s - structure too big, erroring out.\n", __func__);
                        break;
@@ -665,7 +680,8 @@ static int check_i2c_image(struct edgeport_serial *serial)
                        /* Read the descriptor data */
                        status = read_rom(serial, start_address +
                                                sizeof(struct ti_i2c_desc),
-                                               rom_desc->Size, buffer);
+                                               le16_to_cpu(rom_desc->Size),
+                                               buffer);
                        if (status)
                                break;
 
@@ -674,7 +690,7 @@ static int check_i2c_image(struct edgeport_serial *serial)
                                break;
                }
                start_address = start_address + sizeof(struct ti_i2c_desc) +
-                                                               rom_desc->Size;
+                                               le16_to_cpu(rom_desc->Size);
 
        } while ((rom_desc->Type != I2C_DESC_TYPE_ION) &&
                                (start_address < TI_MAX_I2C_SIZE));
@@ -712,7 +728,7 @@ static int get_manuf_info(struct edgeport_serial *serial, __u8 *buffer)
 
        /* Read the descriptor data */
        status = read_rom(serial, start_address+sizeof(struct ti_i2c_desc),
-                                               rom_desc->Size, buffer);
+                                       le16_to_cpu(rom_desc->Size), buffer);
        if (status)
                goto exit;
 
index 367c7f08b27c53c154c4c387a434f159939b4730..f213ee97851650af87a0d22e5173b34a55e1b2de 100644 (file)
@@ -234,8 +234,31 @@ static void option_instat_callback(struct urb *urb);
 #define QUALCOMM_VENDOR_ID                     0x05C6
 
 #define CMOTECH_VENDOR_ID                      0x16d8
-#define CMOTECH_PRODUCT_6008                   0x6008
-#define CMOTECH_PRODUCT_6280                   0x6280
+#define CMOTECH_PRODUCT_6001                   0x6001
+#define CMOTECH_PRODUCT_CMU_300                        0x6002
+#define CMOTECH_PRODUCT_6003                   0x6003
+#define CMOTECH_PRODUCT_6004                   0x6004
+#define CMOTECH_PRODUCT_6005                   0x6005
+#define CMOTECH_PRODUCT_CGU_628A               0x6006
+#define CMOTECH_PRODUCT_CHE_628S               0x6007
+#define CMOTECH_PRODUCT_CMU_301                        0x6008
+#define CMOTECH_PRODUCT_CHU_628                        0x6280
+#define CMOTECH_PRODUCT_CHU_628S               0x6281
+#define CMOTECH_PRODUCT_CDU_680                        0x6803
+#define CMOTECH_PRODUCT_CDU_685A               0x6804
+#define CMOTECH_PRODUCT_CHU_720S               0x7001
+#define CMOTECH_PRODUCT_7002                   0x7002
+#define CMOTECH_PRODUCT_CHU_629K               0x7003
+#define CMOTECH_PRODUCT_7004                   0x7004
+#define CMOTECH_PRODUCT_7005                   0x7005
+#define CMOTECH_PRODUCT_CGU_629                        0x7006
+#define CMOTECH_PRODUCT_CHU_629S               0x700a
+#define CMOTECH_PRODUCT_CHU_720I               0x7211
+#define CMOTECH_PRODUCT_7212                   0x7212
+#define CMOTECH_PRODUCT_7213                   0x7213
+#define CMOTECH_PRODUCT_7251                   0x7251
+#define CMOTECH_PRODUCT_7252                   0x7252
+#define CMOTECH_PRODUCT_7253                   0x7253
 
 #define TELIT_VENDOR_ID                                0x1bc7
 #define TELIT_PRODUCT_UC864E                   0x1003
@@ -287,6 +310,7 @@ static void option_instat_callback(struct urb *urb);
 #define ALCATEL_PRODUCT_X060S_X200             0x0000
 #define ALCATEL_PRODUCT_X220_X500D             0x0017
 #define ALCATEL_PRODUCT_L100V                  0x011e
+#define ALCATEL_PRODUCT_L800MA                 0x0203
 
 #define PIRELLI_VENDOR_ID                      0x1266
 #define PIRELLI_PRODUCT_C100_1                 0x1002
@@ -349,6 +373,7 @@ static void option_instat_callback(struct urb *urb);
 #define OLIVETTI_PRODUCT_OLICARD100            0xc000
 #define OLIVETTI_PRODUCT_OLICARD145            0xc003
 #define OLIVETTI_PRODUCT_OLICARD200            0xc005
+#define OLIVETTI_PRODUCT_OLICARD500            0xc00b
 
 /* Celot products */
 #define CELOT_VENDOR_ID                                0x211f
@@ -502,6 +527,10 @@ static const struct option_blacklist_info huawei_cdc12_blacklist = {
        .reserved = BIT(1) | BIT(2),
 };
 
+static const struct option_blacklist_info net_intf0_blacklist = {
+       .reserved = BIT(0),
+};
+
 static const struct option_blacklist_info net_intf1_blacklist = {
        .reserved = BIT(1),
 };
@@ -1035,8 +1064,47 @@ static const struct usb_device_id option_ids[] = {
        { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */
        { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x0023)}, /* ONYX 3G device */
        { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */
-       { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6280) }, /* BP3-USB & BP3-EXT HSDPA */
-       { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6008) },
+       { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) },
+       { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) },
+       { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6003),
+         .driver_info = (kernel_ulong_t)&net_intf0_blacklist },
+       { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6004) },
+       { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6005) },
+       { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CGU_628A) },
+       { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CHE_628S),
+         .driver_info = (kernel_ulong_t)&net_intf0_blacklist },
+       { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_301),
+         .driver_info = (kernel_ulong_t)&net_intf0_blacklist },
+       { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CHU_628),
+         .driver_info = (kernel_ulong_t)&net_intf0_blacklist },
+       { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CHU_628S) },
+       { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CDU_680) },
+       { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CDU_685A) },
+       { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CHU_720S),
+         .driver_info = (kernel_ulong_t)&net_intf0_blacklist },
+       { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7002),
+         .driver_info = (kernel_ulong_t)&net_intf0_blacklist },
+       { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CHU_629K),
+         .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+       { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7004),
+         .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
+       { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7005) },
+       { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CGU_629),
+         .driver_info = (kernel_ulong_t)&net_intf5_blacklist },
+       { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CHU_629S),
+         .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+       { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CHU_720I),
+         .driver_info = (kernel_ulong_t)&net_intf0_blacklist },
+       { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7212),
+         .driver_info = (kernel_ulong_t)&net_intf0_blacklist },
+       { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7213),
+         .driver_info = (kernel_ulong_t)&net_intf0_blacklist },
+       { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7251),
+         .driver_info = (kernel_ulong_t)&net_intf1_blacklist },
+       { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7252),
+         .driver_info = (kernel_ulong_t)&net_intf1_blacklist },
+       { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7253),
+         .driver_info = (kernel_ulong_t)&net_intf1_blacklist },
        { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UC864E) },
        { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UC864G) },
        { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_CC864_DUAL) },
@@ -1500,6 +1568,8 @@ static const struct usb_device_id option_ids[] = {
          .driver_info = (kernel_ulong_t)&net_intf5_blacklist },
        { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_L100V),
          .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+       { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_L800MA),
+         .driver_info = (kernel_ulong_t)&net_intf2_blacklist },
        { USB_DEVICE(AIRPLUS_VENDOR_ID, AIRPLUS_PRODUCT_MCD650) },
        { USB_DEVICE(TLAYTECH_VENDOR_ID, TLAYTECH_PRODUCT_TEU800) },
        { USB_DEVICE(LONGCHEER_VENDOR_ID, FOUR_G_SYSTEMS_PRODUCT_W14),
@@ -1545,6 +1615,9 @@ static const struct usb_device_id option_ids[] = {
        { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD200),
                .driver_info = (kernel_ulong_t)&net_intf6_blacklist
        },
+       { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD500),
+               .driver_info = (kernel_ulong_t)&net_intf4_blacklist
+       },
        { USB_DEVICE(CELOT_VENDOR_ID, CELOT_PRODUCT_CT680M) }, /* CT-650 CDMA 450 1xEVDO modem */
        { USB_DEVICE_AND_INTERFACE_INFO(SAMSUNG_VENDOR_ID, SAMSUNG_PRODUCT_GT_B3730, USB_CLASS_CDC_DATA, 0x00, 0x00) }, /* Samsung GT-B3730 LTE USB modem.*/
        { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEM600) },
index 968a40201e5f6e2f2fed8de8e1668977e3f47db4..6c0a542e8ec1820d60d03f5a7896843d4a6b96f6 100644 (file)
@@ -136,12 +136,36 @@ static const struct usb_device_id id_table[] = {
        {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x68a2, 0)},       /* Sierra Wireless MC7710 Device Management */
        {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x68a2, 2)},       /* Sierra Wireless MC7710 NMEA */
        {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x68a2, 3)},       /* Sierra Wireless MC7710 Modem */
+       {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x68c0, 0)},       /* Sierra Wireless MC73xx Device Management */
+       {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x68c0, 2)},       /* Sierra Wireless MC73xx NMEA */
+       {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x68c0, 3)},       /* Sierra Wireless MC73xx Modem */
        {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x901c, 0)},       /* Sierra Wireless EM7700 Device Management */
        {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x901c, 2)},       /* Sierra Wireless EM7700 NMEA */
        {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x901c, 3)},       /* Sierra Wireless EM7700 Modem */
+       {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x901f, 0)},       /* Sierra Wireless EM7355 Device Management */
+       {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x901f, 2)},       /* Sierra Wireless EM7355 NMEA */
+       {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x901f, 3)},       /* Sierra Wireless EM7355 Modem */
+       {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9041, 0)},       /* Sierra Wireless MC7305/MC7355 Device Management */
+       {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9041, 2)},       /* Sierra Wireless MC7305/MC7355 NMEA */
+       {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9041, 3)},       /* Sierra Wireless MC7305/MC7355 Modem */
        {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9051, 0)},       /* Netgear AirCard 340U Device Management */
        {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9051, 2)},       /* Netgear AirCard 340U NMEA */
        {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9051, 3)},       /* Netgear AirCard 340U Modem */
+       {USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a2, 0)},       /* Dell Wireless 5806 Gobi(TM) 4G LTE Mobile Broadband Card Device Management */
+       {USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a2, 2)},       /* Dell Wireless 5806 Gobi(TM) 4G LTE Mobile Broadband Card NMEA */
+       {USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a2, 3)},       /* Dell Wireless 5806 Gobi(TM) 4G LTE Mobile Broadband Card Modem */
+       {USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a3, 0)},       /* Dell Wireless 5570 HSPA+ (42Mbps) Mobile Broadband Card Device Management */
+       {USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a3, 2)},       /* Dell Wireless 5570 HSPA+ (42Mbps) Mobile Broadband Card NMEA */
+       {USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a3, 3)},       /* Dell Wireless 5570 HSPA+ (42Mbps) Mobile Broadband Card Modem */
+       {USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a4, 0)},       /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card Device Management */
+       {USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a4, 2)},       /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card NMEA */
+       {USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a4, 3)},       /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card Modem */
+       {USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a8, 0)},       /* Dell Wireless 5808 Gobi(TM) 4G LTE Mobile Broadband Card Device Management */
+       {USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a8, 2)},       /* Dell Wireless 5808 Gobi(TM) 4G LTE Mobile Broadband Card NMEA */
+       {USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a8, 3)},       /* Dell Wireless 5808 Gobi(TM) 4G LTE Mobile Broadband Card Modem */
+       {USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a9, 0)},       /* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card Device Management */
+       {USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a9, 2)},       /* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card NMEA */
+       {USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a9, 3)},       /* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card Modem */
 
        { }                             /* Terminating entry */
 };
index 81fc0dfcfdcf6a073dc408ae7203747b1cd6167b..6d40d56378d77d0a1e4c65f66a2b46cbd81fe396 100644 (file)
@@ -1347,10 +1347,12 @@ static int usb_serial_register(struct usb_serial_driver *driver)
 static void usb_serial_deregister(struct usb_serial_driver *device)
 {
        pr_info("USB Serial deregistering driver %s\n", device->description);
+
        mutex_lock(&table_lock);
        list_del(&device->driver_list);
-       usb_serial_bus_deregister(device);
        mutex_unlock(&table_lock);
+
+       usb_serial_bus_deregister(device);
 }
 
 /**
index 4ef2a80728f74521d103dc8d33b1fed8735b1947..008d805c3d21cde7458058a6a2a5b0803da92bcf 100644 (file)
@@ -1851,7 +1851,7 @@ static int usbat_probe(struct usb_interface *intf,
        us->transport_name = "Shuttle USBAT";
        us->transport = usbat_flash_transport;
        us->transport_reset = usb_stor_CB_reset;
-       us->max_lun = 1;
+       us->max_lun = 0;
 
        result = usb_stor_probe2(us);
        return result;
index f4a82291894ab2964754ac6eb06b1c7dfb4b3978..174a447868cd6924fd81f39ea0da8666b88e2110 100644 (file)
@@ -234,6 +234,20 @@ UNUSUAL_DEV(  0x0421, 0x0495, 0x0370, 0x0370,
                USB_SC_DEVICE, USB_PR_DEVICE, NULL,
                US_FL_MAX_SECTORS_64 ),
 
+/* Reported by Daniele Forsi <dforsi@gmail.com> */
+UNUSUAL_DEV(  0x0421, 0x04b9, 0x0350, 0x0350,
+               "Nokia",
+               "5300",
+               USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+               US_FL_MAX_SECTORS_64 ),
+
+/* Patch submitted by Victor A. Santos <victoraur.santos@gmail.com> */
+UNUSUAL_DEV(  0x0421, 0x05af, 0x0742, 0x0742,
+               "Nokia",
+               "305",
+               USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+               US_FL_MAX_SECTORS_64),
+
 /* Patch submitted by Mikhail Zolotaryov <lebon@lebon.org.ua> */
 UNUSUAL_DEV(  0x0421, 0x06aa, 0x1110, 0x1110,
                "Nokia",
index 44741267c917672149474fea1aef4efcd86d2210..3f485df9622643f1da5345d83a80c62078a80563 100644 (file)
@@ -301,7 +301,7 @@ int wusbhc_chid_set(struct wusbhc *wusbhc, const struct wusb_ckhdid *chid)
 
        if (chid)
                result = uwb_radio_start(&wusbhc->pal);
-       else
+       else if (wusbhc->uwb_rc)
                uwb_radio_stop(&wusbhc->pal);
 
        return result;
index c8e2a47d62a77eee093820f4eee1a3af613ed61a..3e2e4ed2015739bf9acd3cea2a7fd22fa8c3a1d8 100644 (file)
@@ -2390,10 +2390,10 @@ error_complete:
                done) {
 
                dev_info(dev, "Control EP stall.  Queue delayed work.\n");
-               spin_lock_irq(&wa->xfer_list_lock);
+               spin_lock(&wa->xfer_list_lock);
                /* move xfer from xfer_list to xfer_errored_list. */
                list_move_tail(&xfer->list_node, &wa->xfer_errored_list);
-               spin_unlock_irq(&wa->xfer_list_lock);
+               spin_unlock(&wa->xfer_list_lock);
                spin_unlock_irqrestore(&xfer->lock, flags);
                queue_work(wusbd, &wa->xfer_error_work);
        } else {
index 1a2fd9795367cc719aec064b8443bacd5348f5c0..468c89fb6a1689a60ff6d0ccc41d0d1fcb2bd1ef 100644 (file)
@@ -59,6 +59,7 @@ static void uwb_rc_set_drp_cmd_done(struct uwb_rc *rc, void *arg,
                                    struct uwb_rceb *reply, ssize_t reply_size)
 {
        struct uwb_rc_evt_set_drp_ie *r = (struct uwb_rc_evt_set_drp_ie *)reply;
+       unsigned long flags;
 
        if (r != NULL) {
                if (r->bResultCode != UWB_RC_RES_SUCCESS)
@@ -67,14 +68,14 @@ static void uwb_rc_set_drp_cmd_done(struct uwb_rc *rc, void *arg,
        } else
                dev_err(&rc->uwb_dev.dev, "SET-DRP-IE: timeout\n");
 
-       spin_lock_irq(&rc->rsvs_lock);
+       spin_lock_irqsave(&rc->rsvs_lock, flags);
        if (rc->set_drp_ie_pending > 1) {
                rc->set_drp_ie_pending = 0;
-               uwb_rsv_queue_update(rc);       
+               uwb_rsv_queue_update(rc);
        } else {
-               rc->set_drp_ie_pending = 0;     
+               rc->set_drp_ie_pending = 0;
        }
-       spin_unlock_irq(&rc->rsvs_lock);
+       spin_unlock_irqrestore(&rc->rsvs_lock, flags);
 }
 
 /**
index 12a3de0ee6dacbdea873ec9ea28bdd88d1ea999d..a0ed6c7d2cd2a3e91a5d12e48af705d75afe315a 100644 (file)
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -112,6 +112,11 @@ struct kioctx {
 
        struct work_struct      free_work;
 
+       /*
+        * signals when all in-flight requests are done
+        */
+       struct completion *requests_done;
+
        struct {
                /*
                 * This counts the number of available slots in the ringbuffer,
@@ -508,6 +513,10 @@ static void free_ioctx_reqs(struct percpu_ref *ref)
 {
        struct kioctx *ctx = container_of(ref, struct kioctx, reqs);
 
+       /* At this point we know that there are no any in-flight requests */
+       if (ctx->requests_done)
+               complete(ctx->requests_done);
+
        INIT_WORK(&ctx->free_work, free_ioctx);
        schedule_work(&ctx->free_work);
 }
@@ -718,7 +727,8 @@ err:
  *     when the processes owning a context have all exited to encourage
  *     the rapid destruction of the kioctx.
  */
-static void kill_ioctx(struct mm_struct *mm, struct kioctx *ctx)
+static void kill_ioctx(struct mm_struct *mm, struct kioctx *ctx,
+               struct completion *requests_done)
 {
        if (!atomic_xchg(&ctx->dead, 1)) {
                struct kioctx_table *table;
@@ -747,7 +757,11 @@ static void kill_ioctx(struct mm_struct *mm, struct kioctx *ctx)
                if (ctx->mmap_size)
                        vm_munmap(ctx->mmap_base, ctx->mmap_size);
 
+               ctx->requests_done = requests_done;
                percpu_ref_kill(&ctx->users);
+       } else {
+               if (requests_done)
+                       complete(requests_done);
        }
 }
 
@@ -809,7 +823,7 @@ void exit_aio(struct mm_struct *mm)
                 */
                ctx->mmap_size = 0;
 
-               kill_ioctx(mm, ctx);
+               kill_ioctx(mm, ctx, NULL);
        }
 }
 
@@ -1185,7 +1199,7 @@ SYSCALL_DEFINE2(io_setup, unsigned, nr_events, aio_context_t __user *, ctxp)
        if (!IS_ERR(ioctx)) {
                ret = put_user(ioctx->user_id, ctxp);
                if (ret)
-                       kill_ioctx(current->mm, ioctx);
+                       kill_ioctx(current->mm, ioctx, NULL);
                percpu_ref_put(&ioctx->users);
        }
 
@@ -1203,8 +1217,22 @@ SYSCALL_DEFINE1(io_destroy, aio_context_t, ctx)
 {
        struct kioctx *ioctx = lookup_ioctx(ctx);
        if (likely(NULL != ioctx)) {
-               kill_ioctx(current->mm, ioctx);
+               struct completion requests_done =
+                       COMPLETION_INITIALIZER_ONSTACK(requests_done);
+
+               /* Pass requests_done to kill_ioctx() where it can be set
+                * in a thread-safe way. If we try to set it here then we have
+                * a race condition if two io_destroy() called simultaneously.
+                */
+               kill_ioctx(current->mm, ioctx, &requests_done);
                percpu_ref_put(&ioctx->users);
+
+               /* Wait until all IO for the context are done. Otherwise kernel
+                * keep using user-space buffers even if user thinks the context
+                * is destroyed.
+                */
+               wait_for_completion(&requests_done);
+
                return 0;
        }
        pr_debug("EINVAL: io_destroy: invalid context id\n");
@@ -1299,10 +1327,8 @@ rw_common:
                                                &iovec, compat)
                        : aio_setup_single_vector(req, rw, buf, &nr_segs,
                                                  iovec);
-               if (ret)
-                       return ret;
-
-               ret = rw_verify_area(rw, file, &req->ki_pos, req->ki_nbytes);
+               if (!ret)
+                       ret = rw_verify_area(rw, file, &req->ki_pos, req->ki_nbytes);
                if (ret < 0) {
                        if (iovec != &inline_vec)
                                kfree(iovec);
index 4c48df572bd65d74636df643c77acda204af5418..ba6b88528dc7eb28b9fe282cc2370f43fa7ea7d4 100644 (file)
@@ -2058,6 +2058,20 @@ struct btrfs_ioctl_defrag_range_args {
 #define btrfs_raw_test_opt(o, opt)     ((o) & BTRFS_MOUNT_##opt)
 #define btrfs_test_opt(root, opt)      ((root)->fs_info->mount_opt & \
                                         BTRFS_MOUNT_##opt)
+#define btrfs_set_and_info(root, opt, fmt, args...)                    \
+{                                                                      \
+       if (!btrfs_test_opt(root, opt))                                 \
+               btrfs_info(root->fs_info, fmt, ##args);                 \
+       btrfs_set_opt(root->fs_info->mount_opt, opt);                   \
+}
+
+#define btrfs_clear_and_info(root, opt, fmt, args...)                  \
+{                                                                      \
+       if (btrfs_test_opt(root, opt))                                  \
+               btrfs_info(root->fs_info, fmt, ##args);                 \
+       btrfs_clear_opt(root->fs_info->mount_opt, opt);                 \
+}
+
 /*
  * Inode flags
  */
index 029d46c2e17048a20a02e7cb34955d22ff80e752..983314932af3cc51260753b8acf807a98ce0988a 100644 (file)
@@ -2861,7 +2861,7 @@ retry_root_backup:
                        printk(KERN_ERR "BTRFS: failed to read log tree\n");
                        free_extent_buffer(log_tree_root->node);
                        kfree(log_tree_root);
-                       goto fail_trans_kthread;
+                       goto fail_qgroup;
                }
                /* returns with log_tree_root freed on success */
                ret = btrfs_recover_log_trees(log_tree_root);
@@ -2870,24 +2870,24 @@ retry_root_backup:
                                    "Failed to recover log tree");
                        free_extent_buffer(log_tree_root->node);
                        kfree(log_tree_root);
-                       goto fail_trans_kthread;
+                       goto fail_qgroup;
                }
 
                if (sb->s_flags & MS_RDONLY) {
                        ret = btrfs_commit_super(tree_root);
                        if (ret)
-                               goto fail_trans_kthread;
+                               goto fail_qgroup;
                }
        }
 
        ret = btrfs_find_orphan_roots(tree_root);
        if (ret)
-               goto fail_trans_kthread;
+               goto fail_qgroup;
 
        if (!(sb->s_flags & MS_RDONLY)) {
                ret = btrfs_cleanup_fs_roots(fs_info);
                if (ret)
-                       goto fail_trans_kthread;
+                       goto fail_qgroup;
 
                ret = btrfs_recover_relocation(tree_root);
                if (ret < 0) {
index 1306487c82cf6a05c8c528f8851fbe70d84c1f80..5590af92094bb67ea61c8ae397cc393b58b75ae6 100644 (file)
@@ -1542,6 +1542,7 @@ again:
                                ret = 0;
                }
                if (ret) {
+                       key.objectid = bytenr;
                        key.type = BTRFS_EXTENT_ITEM_KEY;
                        key.offset = num_bytes;
                        btrfs_release_path(path);
@@ -3542,11 +3543,13 @@ static u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags)
        return extended_to_chunk(flags | tmp);
 }
 
-static u64 get_alloc_profile(struct btrfs_root *root, u64 flags)
+static u64 get_alloc_profile(struct btrfs_root *root, u64 orig_flags)
 {
        unsigned seq;
+       u64 flags;
 
        do {
+               flags = orig_flags;
                seq = read_seqbegin(&root->fs_info->profiles_lock);
 
                if (flags & BTRFS_BLOCK_GROUP_DATA)
@@ -5719,6 +5722,7 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
 
                        if (ret > 0 && skinny_metadata) {
                                skinny_metadata = false;
+                               key.objectid = bytenr;
                                key.type = BTRFS_EXTENT_ITEM_KEY;
                                key.offset = num_bytes;
                                btrfs_release_path(path);
index eb742c07e7a41aacdb595b0252a12b3584bbee83..ae6af072b635e195e26f3199c3aabd427964881f 100644 (file)
@@ -800,7 +800,7 @@ next_slot:
                if (start > key.offset && end < extent_end) {
                        BUG_ON(del_nr > 0);
                        if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
-                               ret = -EINVAL;
+                               ret = -EOPNOTSUPP;
                                break;
                        }
 
@@ -846,7 +846,7 @@ next_slot:
                 */
                if (start <= key.offset && end < extent_end) {
                        if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
-                               ret = -EINVAL;
+                               ret = -EOPNOTSUPP;
                                break;
                        }
 
@@ -872,7 +872,7 @@ next_slot:
                if (start > key.offset && end >= extent_end) {
                        BUG_ON(del_nr > 0);
                        if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
-                               ret = -EINVAL;
+                               ret = -EOPNOTSUPP;
                                break;
                        }
 
@@ -1777,7 +1777,7 @@ static ssize_t btrfs_file_aio_write(struct kiocb *iocb,
        start_pos = round_down(pos, root->sectorsize);
        if (start_pos > i_size_read(inode)) {
                /* Expand hole size to cover write data, preventing empty gap */
-               end_pos = round_up(pos + iov->iov_len, root->sectorsize);
+               end_pos = round_up(pos + count, root->sectorsize);
                err = btrfs_cont_expand(inode, i_size_read(inode), end_pos);
                if (err) {
                        mutex_unlock(&inode->i_mutex);
index cc8ca193d830f62ec5202933f49d9867b1427969..86935f5ae29162b0b3dec1ed30c34df4c93db921 100644 (file)
@@ -176,7 +176,11 @@ static void start_caching(struct btrfs_root *root)
 
        tsk = kthread_run(caching_kthread, root, "btrfs-ino-cache-%llu\n",
                          root->root_key.objectid);
-       BUG_ON(IS_ERR(tsk)); /* -ENOMEM */
+       if (IS_ERR(tsk)) {
+               btrfs_warn(root->fs_info, "failed to start inode caching task");
+               btrfs_clear_and_info(root, CHANGE_INODE_CACHE,
+                               "disabling inode map caching");
+       }
 }
 
 int btrfs_find_free_ino(struct btrfs_root *root, u64 *objectid)
@@ -205,24 +209,14 @@ again:
 
 void btrfs_return_ino(struct btrfs_root *root, u64 objectid)
 {
-       struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
        struct btrfs_free_space_ctl *pinned = root->free_ino_pinned;
 
        if (!btrfs_test_opt(root, INODE_MAP_CACHE))
                return;
-
 again:
        if (root->cached == BTRFS_CACHE_FINISHED) {
-               __btrfs_add_free_space(ctl, objectid, 1);
+               __btrfs_add_free_space(pinned, objectid, 1);
        } else {
-               /*
-                * If we are in the process of caching free ino chunks,
-                * to avoid adding the same inode number to the free_ino
-                * tree twice due to cross transaction, we'll leave it
-                * in the pinned tree until a transaction is committed
-                * or the caching work is done.
-                */
-
                down_write(&root->fs_info->commit_root_sem);
                spin_lock(&root->cache_lock);
                if (root->cached == BTRFS_CACHE_FINISHED) {
@@ -234,11 +228,7 @@ again:
 
                start_caching(root);
 
-               if (objectid <= root->cache_progress ||
-                   objectid >= root->highest_objectid)
-                       __btrfs_add_free_space(ctl, objectid, 1);
-               else
-                       __btrfs_add_free_space(pinned, objectid, 1);
+               __btrfs_add_free_space(pinned, objectid, 1);
 
                up_write(&root->fs_info->commit_root_sem);
        }
index e79ff6b90cb71bb131426b97838c369ae0e6f48c..2ad7de94efef71e58af4e747cd05d3cb3aeeba40 100644 (file)
@@ -3066,7 +3066,7 @@ process_slot:
                                                         new_key.offset + datal,
                                                         1);
                                if (ret) {
-                                       if (ret != -EINVAL)
+                                       if (ret != -EOPNOTSUPP)
                                                btrfs_abort_transaction(trans,
                                                                root, ret);
                                        btrfs_end_transaction(trans, root);
@@ -3141,7 +3141,7 @@ process_slot:
                                                         new_key.offset + datal,
                                                         1);
                                if (ret) {
-                                       if (ret != -EINVAL)
+                                       if (ret != -EOPNOTSUPP)
                                                btrfs_abort_transaction(trans,
                                                        root, ret);
                                        btrfs_end_transaction(trans, root);
index 1ac3ca98c4294ae54781f97931278386e8bb4bcc..eb6537a08c1bf4438f0bc90df319977cb964bee7 100644 (file)
@@ -349,6 +349,11 @@ static int fs_path_ensure_buf(struct fs_path *p, int len)
        if (p->buf_len >= len)
                return 0;
 
+       if (len > PATH_MAX) {
+               WARN_ON(1);
+               return -ENOMEM;
+       }
+
        path_len = p->end - p->start;
        old_buf_len = p->buf_len;
 
index 5011aadacab8e4cf1ac291f061f3c6ada24f8b9a..9601d25a46075eaa44c0151ba890213efae8ec29 100644 (file)
@@ -385,20 +385,6 @@ static match_table_t tokens = {
        {Opt_err, NULL},
 };
 
-#define btrfs_set_and_info(root, opt, fmt, args...)                    \
-{                                                                      \
-       if (!btrfs_test_opt(root, opt))                                 \
-               btrfs_info(root->fs_info, fmt, ##args);                 \
-       btrfs_set_opt(root->fs_info->mount_opt, opt);                   \
-}
-
-#define btrfs_clear_and_info(root, opt, fmt, args...)                  \
-{                                                                      \
-       if (btrfs_test_opt(root, opt))                                  \
-               btrfs_info(root->fs_info, fmt, ##args);                 \
-       btrfs_clear_opt(root->fs_info->mount_opt, opt);                 \
-}
-
 /*
  * Regular mount options parser.  Everything that is needed only when
  * reading in a new superblock is parsed here.
@@ -1186,7 +1172,6 @@ static struct dentry *mount_subvol(const char *subvol_name, int flags,
                return ERR_PTR(-ENOMEM);
        mnt = vfs_kern_mount(&btrfs_fs_type, flags, device_name,
                             newargs);
-       kfree(newargs);
 
        if (PTR_RET(mnt) == -EBUSY) {
                if (flags & MS_RDONLY) {
@@ -1196,17 +1181,22 @@ static struct dentry *mount_subvol(const char *subvol_name, int flags,
                        int r;
                        mnt = vfs_kern_mount(&btrfs_fs_type, flags | MS_RDONLY, device_name,
                                             newargs);
-                       if (IS_ERR(mnt))
+                       if (IS_ERR(mnt)) {
+                               kfree(newargs);
                                return ERR_CAST(mnt);
+                       }
 
                        r = btrfs_remount(mnt->mnt_sb, &flags, NULL);
                        if (r < 0) {
                                /* FIXME: release vfsmount mnt ??*/
+                               kfree(newargs);
                                return ERR_PTR(r);
                        }
                }
        }
 
+       kfree(newargs);
+
        if (IS_ERR(mnt))
                return ERR_CAST(mnt);
 
index 2e5e648eb5c3dc3bd82bea5ce8dead051864cf75..c561b628ebce519d111d159f541b9df88242a5b1 100644 (file)
@@ -3261,7 +3261,7 @@ int ceph_encode_inode_release(void **p, struct inode *inode,
                        rel->seq = cpu_to_le32(cap->seq);
                        rel->issue_seq = cpu_to_le32(cap->issue_seq),
                        rel->mseq = cpu_to_le32(cap->mseq);
-                       rel->caps = cpu_to_le32(cap->issued);
+                       rel->caps = cpu_to_le32(cap->implemented);
                        rel->wanted = cpu_to_le32(cap->mds_wanted);
                        rel->dname_len = 0;
                        rel->dname_seq = 0;
index 766410a12c2cb209a224fcfd97f63a055fd20801..c29d6ae6887489c29902bec33c4118d4d807e9dc 100644 (file)
@@ -141,7 +141,7 @@ static int __dcache_readdir(struct file *file,  struct dir_context *ctx,
 
        /* start at beginning? */
        if (ctx->pos == 2 || last == NULL ||
-           ctx->pos < ceph_dentry(last)->offset) {
+           fpos_cmp(ctx->pos, ceph_dentry(last)->offset) < 0) {
                if (list_empty(&parent->d_subdirs))
                        goto out_unlock;
                p = parent->d_subdirs.prev;
@@ -182,9 +182,16 @@ more:
        spin_unlock(&dentry->d_lock);
        spin_unlock(&parent->d_lock);
 
+       /* make sure a dentry wasn't dropped while we didn't have parent lock */
+       if (!ceph_dir_is_complete(dir)) {
+               dout(" lost dir complete on %p; falling back to mds\n", dir);
+               dput(dentry);
+               err = -EAGAIN;
+               goto out;
+       }
+
        dout(" %llu (%llu) dentry %p %.*s %p\n", di->offset, ctx->pos,
             dentry, dentry->d_name.len, dentry->d_name.name, dentry->d_inode);
-       ctx->pos = di->offset;
        if (!dir_emit(ctx, dentry->d_name.name,
                      dentry->d_name.len,
                      ceph_translate_ino(dentry->d_sb, dentry->d_inode->i_ino),
@@ -198,19 +205,12 @@ more:
                return 0;
        }
 
+       ctx->pos = di->offset + 1;
+
        if (last)
                dput(last);
        last = dentry;
 
-       ctx->pos++;
-
-       /* make sure a dentry wasn't dropped while we didn't have parent lock */
-       if (!ceph_dir_is_complete(dir)) {
-               dout(" lost dir complete on %p; falling back to mds\n", dir);
-               err = -EAGAIN;
-               goto out;
-       }
-
        spin_lock(&parent->d_lock);
        p = p->prev;    /* advance to next dentry */
        goto more;
@@ -296,6 +296,8 @@ static int ceph_readdir(struct file *file, struct dir_context *ctx)
                err = __dcache_readdir(file, ctx, shared_gen);
                if (err != -EAGAIN)
                        return err;
+               frag = fpos_frag(ctx->pos);
+               off = fpos_off(ctx->pos);
        } else {
                spin_unlock(&ci->i_ceph_lock);
        }
@@ -446,7 +448,6 @@ more:
        if (atomic_read(&ci->i_release_count) == fi->dir_release_count) {
                dout(" marking %p complete\n", inode);
                __ceph_dir_set_complete(ci, fi->dir_release_count);
-               ci->i_max_offset = ctx->pos;
        }
        spin_unlock(&ci->i_ceph_lock);
 
@@ -935,14 +936,16 @@ static int ceph_rename(struct inode *old_dir, struct dentry *old_dentry,
                 * to do it here.
                 */
 
-               /* d_move screws up d_subdirs order */
-               ceph_dir_clear_complete(new_dir);
-
                d_move(old_dentry, new_dentry);
 
                /* ensure target dentry is invalidated, despite
                   rehashing bug in vfs_rename_dir */
                ceph_invalidate_dentry_lease(new_dentry);
+
+               /* d_move screws up sibling dentries' offsets */
+               ceph_dir_clear_complete(old_dir);
+               ceph_dir_clear_complete(new_dir);
+
        }
        ceph_mdsc_put_request(req);
        return err;
index 39da1c2efa5030216d18bc6bb3020a78afb4c5f6..88a6df4cbe6d8a52bd083a756ac452b798c33708 100644 (file)
@@ -1221,9 +1221,6 @@ static long ceph_fallocate(struct file *file, int mode,
        if (!S_ISREG(inode->i_mode))
                return -EOPNOTSUPP;
 
-       if (IS_SWAPFILE(inode))
-               return -ETXTBSY;
-
        mutex_lock(&inode->i_mutex);
 
        if (ceph_snap(inode) != CEPH_NOSNAP) {
index 0b0728e5be2d7cba589a935159b88f9d26f0b2e9..233c6f96910abc78d2b120e4e30a44ba0009b88a 100644 (file)
@@ -744,7 +744,6 @@ static int fill_inode(struct inode *inode,
            !__ceph_dir_is_complete(ci)) {
                dout(" marking %p complete (empty)\n", inode);
                __ceph_dir_set_complete(ci, atomic_read(&ci->i_release_count));
-               ci->i_max_offset = 2;
        }
 no_change:
        /* only update max_size on auth cap */
@@ -889,41 +888,6 @@ out_unlock:
        return;
 }
 
-/*
- * Set dentry's directory position based on the current dir's max, and
- * order it in d_subdirs, so that dcache_readdir behaves.
- *
- * Always called under directory's i_mutex.
- */
-static void ceph_set_dentry_offset(struct dentry *dn)
-{
-       struct dentry *dir = dn->d_parent;
-       struct inode *inode = dir->d_inode;
-       struct ceph_inode_info *ci;
-       struct ceph_dentry_info *di;
-
-       BUG_ON(!inode);
-
-       ci = ceph_inode(inode);
-       di = ceph_dentry(dn);
-
-       spin_lock(&ci->i_ceph_lock);
-       if (!__ceph_dir_is_complete(ci)) {
-               spin_unlock(&ci->i_ceph_lock);
-               return;
-       }
-       di->offset = ceph_inode(inode)->i_max_offset++;
-       spin_unlock(&ci->i_ceph_lock);
-
-       spin_lock(&dir->d_lock);
-       spin_lock_nested(&dn->d_lock, DENTRY_D_LOCK_NESTED);
-       list_move(&dn->d_u.d_child, &dir->d_subdirs);
-       dout("set_dentry_offset %p %lld (%p %p)\n", dn, di->offset,
-            dn->d_u.d_child.prev, dn->d_u.d_child.next);
-       spin_unlock(&dn->d_lock);
-       spin_unlock(&dir->d_lock);
-}
-
 /*
  * splice a dentry to an inode.
  * caller must hold directory i_mutex for this to be safe.
@@ -933,7 +897,7 @@ static void ceph_set_dentry_offset(struct dentry *dn)
  * the caller) if we fail.
  */
 static struct dentry *splice_dentry(struct dentry *dn, struct inode *in,
-                                   bool *prehash, bool set_offset)
+                                   bool *prehash)
 {
        struct dentry *realdn;
 
@@ -965,8 +929,6 @@ static struct dentry *splice_dentry(struct dentry *dn, struct inode *in,
        }
        if ((!prehash || *prehash) && d_unhashed(dn))
                d_rehash(dn);
-       if (set_offset)
-               ceph_set_dentry_offset(dn);
 out:
        return dn;
 }
@@ -987,7 +949,6 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req,
 {
        struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
        struct inode *in = NULL;
-       struct ceph_mds_reply_inode *ininfo;
        struct ceph_vino vino;
        struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
        int err = 0;
@@ -1161,6 +1122,9 @@ retry_lookup:
 
                /* rename? */
                if (req->r_old_dentry && req->r_op == CEPH_MDS_OP_RENAME) {
+                       struct inode *olddir = req->r_old_dentry_dir;
+                       BUG_ON(!olddir);
+
                        dout(" src %p '%.*s' dst %p '%.*s'\n",
                             req->r_old_dentry,
                             req->r_old_dentry->d_name.len,
@@ -1180,13 +1144,10 @@ retry_lookup:
                           rehashing bug in vfs_rename_dir */
                        ceph_invalidate_dentry_lease(dn);
 
-                       /*
-                        * d_move() puts the renamed dentry at the end of
-                        * d_subdirs.  We need to assign it an appropriate
-                        * directory offset so we can behave when dir is
-                        * complete.
-                        */
-                       ceph_set_dentry_offset(req->r_old_dentry);
+                       /* d_move screws up sibling dentries' offsets */
+                       ceph_dir_clear_complete(dir);
+                       ceph_dir_clear_complete(olddir);
+
                        dout("dn %p gets new offset %lld\n", req->r_old_dentry,
                             ceph_dentry(req->r_old_dentry)->offset);
 
@@ -1213,8 +1174,9 @@ retry_lookup:
 
                /* attach proper inode */
                if (!dn->d_inode) {
+                       ceph_dir_clear_complete(dir);
                        ihold(in);
-                       dn = splice_dentry(dn, in, &have_lease, true);
+                       dn = splice_dentry(dn, in, &have_lease);
                        if (IS_ERR(dn)) {
                                err = PTR_ERR(dn);
                                goto done;
@@ -1235,17 +1197,16 @@ retry_lookup:
                   (req->r_op == CEPH_MDS_OP_LOOKUPSNAP ||
                    req->r_op == CEPH_MDS_OP_MKSNAP)) {
                struct dentry *dn = req->r_dentry;
+               struct inode *dir = req->r_locked_dir;
 
                /* fill out a snapdir LOOKUPSNAP dentry */
                BUG_ON(!dn);
-               BUG_ON(!req->r_locked_dir);
-               BUG_ON(ceph_snap(req->r_locked_dir) != CEPH_SNAPDIR);
-               ininfo = rinfo->targeti.in;
-               vino.ino = le64_to_cpu(ininfo->ino);
-               vino.snap = le64_to_cpu(ininfo->snapid);
+               BUG_ON(!dir);
+               BUG_ON(ceph_snap(dir) != CEPH_SNAPDIR);
                dout(" linking snapped dir %p to dn %p\n", in, dn);
+               ceph_dir_clear_complete(dir);
                ihold(in);
-               dn = splice_dentry(dn, in, NULL, true);
+               dn = splice_dentry(dn, in, NULL);
                if (IS_ERR(dn)) {
                        err = PTR_ERR(dn);
                        goto done;
@@ -1407,7 +1368,7 @@ retry_lookup:
                }
 
                if (!dn->d_inode) {
-                       dn = splice_dentry(dn, in, NULL, false);
+                       dn = splice_dentry(dn, in, NULL);
                        if (IS_ERR(dn)) {
                                err = PTR_ERR(dn);
                                dn = NULL;
index fdf941b44ff103a2590a3804aa850e468ec980d6..a822a6e58290bbedfb0e363bf3bbb601075891fb 100644 (file)
@@ -109,6 +109,8 @@ static long ceph_ioctl_set_layout(struct file *file, void __user *arg)
                return PTR_ERR(req);
        req->r_inode = inode;
        ihold(inode);
+       req->r_num_caps = 1;
+
        req->r_inode_drop = CEPH_CAP_FILE_SHARED | CEPH_CAP_FILE_EXCL;
 
        req->r_args.setlayout.layout.fl_stripe_unit =
@@ -153,6 +155,7 @@ static long ceph_ioctl_set_layout_policy (struct file *file, void __user *arg)
                return PTR_ERR(req);
        req->r_inode = inode;
        ihold(inode);
+       req->r_num_caps = 1;
 
        req->r_args.setlayout.layout.fl_stripe_unit =
                        cpu_to_le32(l.stripe_unit);
index d94ba0df9f4d195cabf677fcdcd41cc01096c7e7..191398852a2e8927b1ac193b53ceea623829caf1 100644 (file)
@@ -45,6 +45,7 @@ static int ceph_lock_message(u8 lock_type, u16 operation, struct file *file,
                return PTR_ERR(req);
        req->r_inode = inode;
        ihold(inode);
+       req->r_num_caps = 1;
 
        /* mds requires start and length rather than start and end */
        if (LLONG_MAX == fl->fl_end)
index 7866cd05a6bbee4afd2478f3737d0ccd8ac28975..ead05cc1f447562271578131ab25769257080915 100644 (file)
@@ -266,7 +266,6 @@ struct ceph_inode_info {
        struct timespec i_rctime;
        u64 i_rbytes, i_rfiles, i_rsubdirs;
        u64 i_files, i_subdirs;
-       u64 i_max_offset;  /* largest readdir offset, set with complete dir */
 
        struct rb_root i_fragtree;
        struct mutex i_fragtree_mutex;
index ca926ad0430cf715af12ebc3f0fa86d8c273d492..66d3d3c6b4b248878af6d751178d055310224c3d 100644 (file)
@@ -457,9 +457,9 @@ COMPAT_SYSCALL_DEFINE3(fcntl64, unsigned int, fd, unsigned int, cmd,
        case F_GETLK64:
        case F_SETLK64:
        case F_SETLKW64:
-       case F_GETLKP:
-       case F_SETLKP:
-       case F_SETLKPW:
+       case F_OFD_GETLK:
+       case F_OFD_SETLK:
+       case F_OFD_SETLKW:
                ret = get_compat_flock64(&f, compat_ptr(arg));
                if (ret != 0)
                        break;
@@ -468,7 +468,7 @@ COMPAT_SYSCALL_DEFINE3(fcntl64, unsigned int, fd, unsigned int, cmd,
                conv_cmd = convert_fcntl_cmd(cmd);
                ret = sys_fcntl(fd, conv_cmd, (unsigned long)&f);
                set_fs(old_fs);
-               if ((conv_cmd == F_GETLK || conv_cmd == F_GETLKP) && ret == 0) {
+               if ((conv_cmd == F_GETLK || conv_cmd == F_OFD_GETLK) && ret == 0) {
                        /* need to return lock information - see above for commentary */
                        if (f.l_start > COMPAT_LOFF_T_MAX)
                                ret = -EOVERFLOW;
@@ -493,9 +493,9 @@ COMPAT_SYSCALL_DEFINE3(fcntl, unsigned int, fd, unsigned int, cmd,
        case F_GETLK64:
        case F_SETLK64:
        case F_SETLKW64:
-       case F_GETLKP:
-       case F_SETLKP:
-       case F_SETLKPW:
+       case F_OFD_GETLK:
+       case F_OFD_SETLK:
+       case F_OFD_SETLKW:
                return -EINVAL;
        }
        return compat_sys_fcntl64(fd, cmd, arg);
index e3ad709a4232f414b91fe1ed18989a07d0f7aaa3..0b2528fb640e77e4a38a351c51f8a01f12102c14 100644 (file)
@@ -73,10 +73,15 @@ static int expand_corename(struct core_name *cn, int size)
 static int cn_vprintf(struct core_name *cn, const char *fmt, va_list arg)
 {
        int free, need;
+       va_list arg_copy;
 
 again:
        free = cn->size - cn->used;
-       need = vsnprintf(cn->corename + cn->used, free, fmt, arg);
+
+       va_copy(arg_copy, arg);
+       need = vsnprintf(cn->corename + cn->used, free, fmt, arg_copy);
+       va_end(arg_copy);
+
        if (need < free) {
                cn->used += need;
                return 0;
index 6ea7b1436bbc201e872d6ee18f7321b2e099f156..5c56785007e0e36fec78e6535aa210e09247a983 100644 (file)
@@ -667,7 +667,7 @@ ext4_fsblk_t ext4_count_free_clusters(struct super_block *sb)
                        continue;
 
                x = ext4_count_free(bitmap_bh->b_data,
-                                   EXT4_BLOCKS_PER_GROUP(sb) / 8);
+                                   EXT4_CLUSTERS_PER_GROUP(sb) / 8);
                printk(KERN_DEBUG "group %u: stored = %d, counted = %u\n",
                        i, ext4_free_group_clusters(sb, gdp), x);
                bitmap_count += x;
index f1c65dc7cc0ad268a9fccc7b6f1aeaf078d84a0a..66946aa621270716c580a2617bceecbcadb6bda7 100644 (file)
@@ -2466,23 +2466,6 @@ static inline void ext4_update_i_disksize(struct inode *inode, loff_t newsize)
        up_write(&EXT4_I(inode)->i_data_sem);
 }
 
-/*
- * Update i_disksize after writeback has been started. Races with truncate
- * are avoided by checking i_size under i_data_sem.
- */
-static inline void ext4_wb_update_i_disksize(struct inode *inode, loff_t newsize)
-{
-       loff_t i_size;
-
-       down_write(&EXT4_I(inode)->i_data_sem);
-       i_size = i_size_read(inode);
-       if (newsize > i_size)
-               newsize = i_size;
-       if (newsize > EXT4_I(inode)->i_disksize)
-               EXT4_I(inode)->i_disksize = newsize;
-       up_write(&EXT4_I(inode)->i_data_sem);
-}
-
 struct ext4_group_info {
        unsigned long   bb_state;
        struct rb_root  bb_free_root;
index 82df3ce9874ab7f3a65abc10e2bd2238b1ae2af3..01b0c208f62507e12f50ddd4fd3669972797f823 100644 (file)
@@ -3313,6 +3313,11 @@ static int ext4_split_extent(handle_t *handle,
                return PTR_ERR(path);
        depth = ext_depth(inode);
        ex = path[depth].p_ext;
+       if (!ex) {
+               EXT4_ERROR_INODE(inode, "unexpected hole at %lu",
+                                (unsigned long) map->m_lblk);
+               return -EIO;
+       }
        uninitialized = ext4_ext_is_uninitialized(ex);
        split_flag1 = 0;
 
@@ -3694,6 +3699,12 @@ static int ext4_convert_initialized_extents(handle_t *handle,
                }
                depth = ext_depth(inode);
                ex = path[depth].p_ext;
+               if (!ex) {
+                       EXT4_ERROR_INODE(inode, "unexpected hole at %lu",
+                                        (unsigned long) map->m_lblk);
+                       err = -EIO;
+                       goto out;
+               }
        }
 
        err = ext4_ext_get_access(handle, inode, path + depth);
@@ -4730,6 +4741,9 @@ static long ext4_zero_range(struct file *file, loff_t offset,
 
        trace_ext4_zero_range(inode, offset, len, mode);
 
+       if (!S_ISREG(inode->i_mode))
+               return -EINVAL;
+
        /*
         * Write out all dirty pages to avoid race conditions
         * Then release them.
@@ -4878,9 +4892,6 @@ long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
        if (mode & FALLOC_FL_PUNCH_HOLE)
                return ext4_punch_hole(inode, offset, len);
 
-       if (mode & FALLOC_FL_COLLAPSE_RANGE)
-               return ext4_collapse_range(inode, offset, len);
-
        ret = ext4_convert_inline_data(inode);
        if (ret)
                return ret;
@@ -4892,6 +4903,9 @@ long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
        if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
                return -EOPNOTSUPP;
 
+       if (mode & FALLOC_FL_COLLAPSE_RANGE)
+               return ext4_collapse_range(inode, offset, len);
+
        if (mode & FALLOC_FL_ZERO_RANGE)
                return ext4_zero_range(file, offset, len, mode);
 
@@ -5229,18 +5243,19 @@ ext4_ext_shift_path_extents(struct ext4_ext_path *path, ext4_lblk_t shift,
                        if (ex_start == EXT_FIRST_EXTENT(path[depth].p_hdr))
                                update = 1;
 
-                       *start = ex_last->ee_block +
+                       *start = le32_to_cpu(ex_last->ee_block) +
                                ext4_ext_get_actual_len(ex_last);
 
                        while (ex_start <= ex_last) {
-                               ex_start->ee_block -= shift;
-                               if (ex_start >
-                                       EXT_FIRST_EXTENT(path[depth].p_hdr)) {
-                                       if (ext4_ext_try_to_merge_right(inode,
-                                               path, ex_start - 1))
-                                               ex_last--;
-                               }
-                               ex_start++;
+                               le32_add_cpu(&ex_start->ee_block, -shift);
+                               /* Try to merge to the left. */
+                               if ((ex_start >
+                                    EXT_FIRST_EXTENT(path[depth].p_hdr)) &&
+                                   ext4_ext_try_to_merge_right(inode,
+                                                       path, ex_start - 1))
+                                       ex_last--;
+                               else
+                                       ex_start++;
                        }
                        err = ext4_ext_dirty(handle, inode, path + depth);
                        if (err)
@@ -5255,7 +5270,7 @@ ext4_ext_shift_path_extents(struct ext4_ext_path *path, ext4_lblk_t shift,
                if (err)
                        goto out;
 
-               path[depth].p_idx->ei_block -= shift;
+               le32_add_cpu(&path[depth].p_idx->ei_block, -shift);
                err = ext4_ext_dirty(handle, inode, path + depth);
                if (err)
                        goto out;
@@ -5300,7 +5315,8 @@ ext4_ext_shift_extents(struct inode *inode, handle_t *handle,
                return ret;
        }
 
-       stop_block = extent->ee_block + ext4_ext_get_actual_len(extent);
+       stop_block = le32_to_cpu(extent->ee_block) +
+                       ext4_ext_get_actual_len(extent);
        ext4_ext_drop_refs(path);
        kfree(path);
 
@@ -5313,10 +5329,18 @@ ext4_ext_shift_extents(struct inode *inode, handle_t *handle,
         * enough to accomodate the shift.
         */
        path = ext4_ext_find_extent(inode, start - 1, NULL, 0);
+       if (IS_ERR(path))
+               return PTR_ERR(path);
        depth = path->p_depth;
        extent =  path[depth].p_ext;
-       ex_start = extent->ee_block;
-       ex_end = extent->ee_block + ext4_ext_get_actual_len(extent);
+       if (extent) {
+               ex_start = le32_to_cpu(extent->ee_block);
+               ex_end = le32_to_cpu(extent->ee_block) +
+                       ext4_ext_get_actual_len(extent);
+       } else {
+               ex_start = 0;
+               ex_end = 0;
+       }
        ext4_ext_drop_refs(path);
        kfree(path);
 
@@ -5331,7 +5355,13 @@ ext4_ext_shift_extents(struct inode *inode, handle_t *handle,
                        return PTR_ERR(path);
                depth = path->p_depth;
                extent = path[depth].p_ext;
-               current_block = extent->ee_block;
+               if (!extent) {
+                       EXT4_ERROR_INODE(inode, "unexpected hole at %lu",
+                                        (unsigned long) start);
+                       return -EIO;
+               }
+
+               current_block = le32_to_cpu(extent->ee_block);
                if (start > current_block) {
                        /* Hole, move to the next extent */
                        ret = mext_next_extent(inode, path, &extent);
@@ -5365,17 +5395,18 @@ int ext4_collapse_range(struct inode *inode, loff_t offset, loff_t len)
        ext4_lblk_t punch_start, punch_stop;
        handle_t *handle;
        unsigned int credits;
-       loff_t new_size;
+       loff_t new_size, ioffset;
        int ret;
 
-       BUG_ON(offset + len > i_size_read(inode));
-
        /* Collapse range works only on fs block size aligned offsets. */
        if (offset & (EXT4_BLOCK_SIZE(sb) - 1) ||
            len & (EXT4_BLOCK_SIZE(sb) - 1))
                return -EINVAL;
 
        if (!S_ISREG(inode->i_mode))
+               return -EINVAL;
+
+       if (EXT4_SB(inode->i_sb)->s_cluster_ratio > 1)
                return -EOPNOTSUPP;
 
        trace_ext4_collapse_range(inode, offset, len);
@@ -5383,22 +5414,34 @@ int ext4_collapse_range(struct inode *inode, loff_t offset, loff_t len)
        punch_start = offset >> EXT4_BLOCK_SIZE_BITS(sb);
        punch_stop = (offset + len) >> EXT4_BLOCK_SIZE_BITS(sb);
 
+       /* Call ext4_force_commit to flush all data in case of data=journal. */
+       if (ext4_should_journal_data(inode)) {
+               ret = ext4_force_commit(inode->i_sb);
+               if (ret)
+                       return ret;
+       }
+
+       /*
+        * Need to round down offset to be aligned with page size boundary
+        * for page size > block size.
+        */
+       ioffset = round_down(offset, PAGE_SIZE);
+
        /* Write out all dirty pages */
-       ret = filemap_write_and_wait_range(inode->i_mapping, offset, -1);
+       ret = filemap_write_and_wait_range(inode->i_mapping, ioffset,
+                                          LLONG_MAX);
        if (ret)
                return ret;
 
        /* Take mutex lock */
        mutex_lock(&inode->i_mutex);
 
-       /* It's not possible punch hole on append only file */
-       if (IS_APPEND(inode) || IS_IMMUTABLE(inode)) {
-               ret = -EPERM;
-               goto out_mutex;
-       }
-
-       if (IS_SWAPFILE(inode)) {
-               ret = -ETXTBSY;
+       /*
+        * There is no need to overlap collapse range with EOF, in which case
+        * it is effectively a truncate operation
+        */
+       if (offset + len >= i_size_read(inode)) {
+               ret = -EINVAL;
                goto out_mutex;
        }
 
@@ -5408,7 +5451,7 @@ int ext4_collapse_range(struct inode *inode, loff_t offset, loff_t len)
                goto out_mutex;
        }
 
-       truncate_pagecache_range(inode, offset, -1);
+       truncate_pagecache(inode, ioffset);
 
        /* Wait for existing dio to complete */
        ext4_inode_block_unlocked_dio(inode);
@@ -5425,7 +5468,7 @@ int ext4_collapse_range(struct inode *inode, loff_t offset, loff_t len)
        ext4_discard_preallocations(inode);
 
        ret = ext4_es_remove_extent(inode, punch_start,
-                                   EXT_MAX_BLOCKS - punch_start - 1);
+                                   EXT_MAX_BLOCKS - punch_start);
        if (ret) {
                up_write(&EXT4_I(inode)->i_data_sem);
                goto out_stop;
@@ -5436,6 +5479,7 @@ int ext4_collapse_range(struct inode *inode, loff_t offset, loff_t len)
                up_write(&EXT4_I(inode)->i_data_sem);
                goto out_stop;
        }
+       ext4_discard_preallocations(inode);
 
        ret = ext4_ext_shift_extents(inode, handle, punch_stop,
                                     punch_stop - punch_start);
@@ -5445,10 +5489,9 @@ int ext4_collapse_range(struct inode *inode, loff_t offset, loff_t len)
        }
 
        new_size = i_size_read(inode) - len;
-       truncate_setsize(inode, new_size);
+       i_size_write(inode, new_size);
        EXT4_I(inode)->i_disksize = new_size;
 
-       ext4_discard_preallocations(inode);
        up_write(&EXT4_I(inode)->i_data_sem);
        if (IS_SYNC(inode))
                ext4_handle_sync(handle);
index 0a014a7194b28cac95e56f21b59f3776fcf8c9fc..0ebc21204b5184841405f890fa11dd5ae11ef54c 100644 (file)
@@ -810,7 +810,7 @@ retry:
 
                        newes.es_lblk = end + 1;
                        newes.es_len = len2;
-                       block = 0x7FDEADBEEF;
+                       block = 0x7FDEADBEEFULL;
                        if (ext4_es_is_written(&orig_es) ||
                            ext4_es_is_unwritten(&orig_es))
                                block = ext4_es_pblock(&orig_es) +
index ca7502d89fdee07b96585c768854375b207daaf6..063fc1538355972d912553ad6c8e419390f057de 100644 (file)
@@ -82,7 +82,7 @@ ext4_unaligned_aio(struct inode *inode, const struct iovec *iov,
        size_t count = iov_length(iov, nr_segs);
        loff_t final_size = pos + count;
 
-       if (pos >= inode->i_size)
+       if (pos >= i_size_read(inode))
                return 0;
 
        if ((pos & blockmask) || (final_size & blockmask))
index 5b0d2c7d54080dea4080909fe8ec6a74ecf19b56..d7b7462a0e13e11e7131f2b148d1323a3de5c996 100644 (file)
@@ -522,6 +522,10 @@ int ext4_map_blocks(handle_t *handle, struct inode *inode,
        if (unlikely(map->m_len > INT_MAX))
                map->m_len = INT_MAX;
 
+       /* We can handle the block number less than EXT_MAX_BLOCKS */
+       if (unlikely(map->m_lblk >= EXT_MAX_BLOCKS))
+               return -EIO;
+
        /* Lookup extent status tree firstly */
        if (ext4_es_lookup_extent(inode, map->m_lblk, &es)) {
                ext4_es_lru_add(inode);
@@ -2243,13 +2247,23 @@ static int mpage_map_and_submit_extent(handle_t *handle,
                        return err;
        } while (map->m_len);
 
-       /* Update on-disk size after IO is submitted */
+       /*
+        * Update on-disk size after IO is submitted.  Races with
+        * truncate are avoided by checking i_size under i_data_sem.
+        */
        disksize = ((loff_t)mpd->first_page) << PAGE_CACHE_SHIFT;
        if (disksize > EXT4_I(inode)->i_disksize) {
                int err2;
-
-               ext4_wb_update_i_disksize(inode, disksize);
+               loff_t i_size;
+
+               down_write(&EXT4_I(inode)->i_data_sem);
+               i_size = i_size_read(inode);
+               if (disksize > i_size)
+                       disksize = i_size;
+               if (disksize > EXT4_I(inode)->i_disksize)
+                       EXT4_I(inode)->i_disksize = disksize;
                err2 = ext4_mark_inode_dirty(handle, inode);
+               up_write(&EXT4_I(inode)->i_data_sem);
                if (err2)
                        ext4_error(inode->i_sb,
                                   "Failed to mark inode %lu dirty",
@@ -3527,15 +3541,6 @@ int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length)
        }
 
        mutex_lock(&inode->i_mutex);
-       /* It's not possible punch hole on append only file */
-       if (IS_APPEND(inode) || IS_IMMUTABLE(inode)) {
-               ret = -EPERM;
-               goto out_mutex;
-       }
-       if (IS_SWAPFILE(inode)) {
-               ret = -ETXTBSY;
-               goto out_mutex;
-       }
 
        /* No need to punch hole beyond i_size */
        if (offset >= inode->i_size)
@@ -3616,7 +3621,6 @@ int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length)
                ret = ext4_free_hole_blocks(handle, inode, first_block,
                                            stop_block);
 
-       ext4_discard_preallocations(inode);
        up_write(&EXT4_I(inode)->i_data_sem);
        if (IS_SYNC(inode))
                ext4_handle_sync(handle);
@@ -4423,21 +4427,20 @@ out_brelse:
  *
  * We are called from a few places:
  *
- * - Within generic_file_write() for O_SYNC files.
+ * - Within generic_file_aio_write() -> generic_write_sync() for O_SYNC files.
  *   Here, there will be no transaction running. We wait for any running
  *   transaction to commit.
  *
- * - Within sys_sync(), kupdate and such.
- *   We wait on commit, if tol to.
+ * - Within flush work (sys_sync(), kupdate and such).
+ *   We wait on commit, if told to.
  *
- * - Within prune_icache() (PF_MEMALLOC == true)
- *   Here we simply return.  We can't afford to block kswapd on the
- *   journal commit.
+ * - Within iput_final() -> write_inode_now()
+ *   We wait on commit, if told to.
  *
  * In all cases it is actually safe for us to return without doing anything,
  * because the inode has been copied into a raw inode buffer in
- * ext4_mark_inode_dirty().  This is a correctness thing for O_SYNC and for
- * knfsd.
+ * ext4_mark_inode_dirty().  This is a correctness thing for WB_SYNC_ALL
+ * writeback.
  *
  * Note that we are absolutely dependent upon all inode dirtiers doing the
  * right thing: they *must* call mark_inode_dirty() after dirtying info in
@@ -4449,15 +4452,15 @@ out_brelse:
  *     stuff();
  *     inode->i_size = expr;
  *
- * is in error because a kswapd-driven write_inode() could occur while
- * `stuff()' is running, and the new i_size will be lost.  Plus the inode
- * will no longer be on the superblock's dirty inode list.
+ * is in error because write_inode() could occur while `stuff()' is running,
+ * and the new i_size will be lost.  Plus the inode will no longer be on the
+ * superblock's dirty inode list.
  */
 int ext4_write_inode(struct inode *inode, struct writeback_control *wbc)
 {
        int err;
 
-       if (current->flags & PF_MEMALLOC)
+       if (WARN_ON_ONCE(current->flags & PF_MEMALLOC))
                return 0;
 
        if (EXT4_SB(inode->i_sb)->s_journal) {
index a888cac76e9c55c34002f930a7bc8a8df53376bf..c8238a26818cd9ef7567d0552a60a461bfd1f76e 100644 (file)
@@ -989,7 +989,7 @@ static int ext4_mb_get_buddy_page_lock(struct super_block *sb,
        poff = block % blocks_per_page;
        page = find_or_create_page(inode->i_mapping, pnum, GFP_NOFS);
        if (!page)
-               return -EIO;
+               return -ENOMEM;
        BUG_ON(page->mapping != inode->i_mapping);
        e4b->bd_bitmap_page = page;
        e4b->bd_bitmap = page_address(page) + (poff * sb->s_blocksize);
@@ -1003,7 +1003,7 @@ static int ext4_mb_get_buddy_page_lock(struct super_block *sb,
        pnum = block / blocks_per_page;
        page = find_or_create_page(inode->i_mapping, pnum, GFP_NOFS);
        if (!page)
-               return -EIO;
+               return -ENOMEM;
        BUG_ON(page->mapping != inode->i_mapping);
        e4b->bd_buddy_page = page;
        return 0;
@@ -1168,7 +1168,11 @@ ext4_mb_load_buddy(struct super_block *sb, ext4_group_t group,
                        unlock_page(page);
                }
        }
-       if (page == NULL || !PageUptodate(page)) {
+       if (page == NULL) {
+               ret = -ENOMEM;
+               goto err;
+       }
+       if (!PageUptodate(page)) {
                ret = -EIO;
                goto err;
        }
@@ -1197,7 +1201,11 @@ ext4_mb_load_buddy(struct super_block *sb, ext4_group_t group,
                        unlock_page(page);
                }
        }
-       if (page == NULL || !PageUptodate(page)) {
+       if (page == NULL) {
+               ret = -ENOMEM;
+               goto err;
+       }
+       if (!PageUptodate(page)) {
                ret = -EIO;
                goto err;
        }
@@ -5008,6 +5016,8 @@ error_return:
  */
 static int ext4_trim_extent(struct super_block *sb, int start, int count,
                             ext4_group_t group, struct ext4_buddy *e4b)
+__releases(bitlock)
+__acquires(bitlock)
 {
        struct ext4_free_extent ex;
        int ret = 0;
index ab95508e3d4018eab92647c6d2308e98524080d1..c18d95b5054081c75e0c7a2fab975976838f9b02 100644 (file)
@@ -308,13 +308,14 @@ static void ext4_end_bio(struct bio *bio, int error)
        if (error) {
                struct inode *inode = io_end->inode;
 
-               ext4_warning(inode->i_sb, "I/O error writing to inode %lu "
+               ext4_warning(inode->i_sb, "I/O error %d writing to inode %lu "
                             "(offset %llu size %ld starting block %llu)",
-                            inode->i_ino,
+                            error, inode->i_ino,
                             (unsigned long long) io_end->offset,
                             (long) io_end->size,
                             (unsigned long long)
                             bi_sector >> (inode->i_blkbits - 9));
+               mapping_set_error(inode->i_mapping, error);
        }
 
        if (io_end->flag & EXT4_IO_END_UNWRITTEN) {
index f3c667091618d8b26e09964dafe2f673a4c6cbd3..6f9e6fadac04e1c8af1d4a98d6258cdbc2f45dea 100644 (file)
@@ -3869,19 +3869,38 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
                        goto failed_mount2;
                }
        }
+
+       /*
+        * set up enough so that it can read an inode,
+        * and create new inode for buddy allocator
+        */
+       sbi->s_gdb_count = db_count;
+       if (!test_opt(sb, NOLOAD) &&
+           EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_HAS_JOURNAL))
+               sb->s_op = &ext4_sops;
+       else
+               sb->s_op = &ext4_nojournal_sops;
+
+       ext4_ext_init(sb);
+       err = ext4_mb_init(sb);
+       if (err) {
+               ext4_msg(sb, KERN_ERR, "failed to initialize mballoc (%d)",
+                        err);
+               goto failed_mount2;
+       }
+
        if (!ext4_check_descriptors(sb, &first_not_zeroed)) {
                ext4_msg(sb, KERN_ERR, "group descriptors corrupted!");
-               goto failed_mount2;
+               goto failed_mount2a;
        }
        if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_FLEX_BG))
                if (!ext4_fill_flex_info(sb)) {
                        ext4_msg(sb, KERN_ERR,
                               "unable to initialize "
                               "flex_bg meta info!");
-                       goto failed_mount2;
+                       goto failed_mount2a;
                }
 
-       sbi->s_gdb_count = db_count;
        get_random_bytes(&sbi->s_next_generation, sizeof(u32));
        spin_lock_init(&sbi->s_next_gen_lock);
 
@@ -3916,14 +3935,6 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
        sbi->s_stripe = ext4_get_stripe_size(sbi);
        sbi->s_extent_max_zeroout_kb = 32;
 
-       /*
-        * set up enough so that it can read an inode
-        */
-       if (!test_opt(sb, NOLOAD) &&
-           EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_HAS_JOURNAL))
-               sb->s_op = &ext4_sops;
-       else
-               sb->s_op = &ext4_nojournal_sops;
        sb->s_export_op = &ext4_export_ops;
        sb->s_xattr = ext4_xattr_handlers;
 #ifdef CONFIG_QUOTA
@@ -4113,21 +4124,13 @@ no_journal:
        if (err) {
                ext4_msg(sb, KERN_ERR, "failed to reserve %llu clusters for "
                         "reserved pool", ext4_calculate_resv_clusters(sb));
-               goto failed_mount4a;
+               goto failed_mount5;
        }
 
        err = ext4_setup_system_zone(sb);
        if (err) {
                ext4_msg(sb, KERN_ERR, "failed to initialize system "
                         "zone (%d)", err);
-               goto failed_mount4a;
-       }
-
-       ext4_ext_init(sb);
-       err = ext4_mb_init(sb);
-       if (err) {
-               ext4_msg(sb, KERN_ERR, "failed to initialize mballoc (%d)",
-                        err);
                goto failed_mount5;
        }
 
@@ -4204,11 +4207,8 @@ failed_mount8:
 failed_mount7:
        ext4_unregister_li_request(sb);
 failed_mount6:
-       ext4_mb_release(sb);
-failed_mount5:
-       ext4_ext_release(sb);
        ext4_release_system_zone(sb);
-failed_mount4a:
+failed_mount5:
        dput(sb->s_root);
        sb->s_root = NULL;
 failed_mount4:
@@ -4232,11 +4232,14 @@ failed_mount3:
        percpu_counter_destroy(&sbi->s_extent_cache_cnt);
        if (sbi->s_mmp_tsk)
                kthread_stop(sbi->s_mmp_tsk);
+failed_mount2a:
+       ext4_mb_release(sb);
 failed_mount2:
        for (i = 0; i < db_count; i++)
                brelse(sbi->s_group_desc[i]);
        ext4_kvfree(sbi->s_group_desc);
 failed_mount:
+       ext4_ext_release(sb);
        if (sbi->s_chksum_driver)
                crypto_free_shash(sbi->s_chksum_driver);
        if (sbi->s_proc) {
index 1f5cf5880718d28c8ca7893f7165807f78101c6b..4eec399ec807bc6733d1a90b8c3d0d205eb795c1 100644 (file)
@@ -520,8 +520,8 @@ static void ext4_xattr_update_super_block(handle_t *handle,
 }
 
 /*
- * Release the xattr block BH: If the reference count is > 1, decrement
- * it; otherwise free the block.
+ * Release the xattr block BH: If the reference count is > 1, decrement it;
+ * otherwise free the block.
  */
 static void
 ext4_xattr_release_block(handle_t *handle, struct inode *inode,
@@ -542,16 +542,31 @@ ext4_xattr_release_block(handle_t *handle, struct inode *inode,
                if (ce)
                        mb_cache_entry_free(ce);
                get_bh(bh);
+               unlock_buffer(bh);
                ext4_free_blocks(handle, inode, bh, 0, 1,
                                 EXT4_FREE_BLOCKS_METADATA |
                                 EXT4_FREE_BLOCKS_FORGET);
-               unlock_buffer(bh);
        } else {
                le32_add_cpu(&BHDR(bh)->h_refcount, -1);
                if (ce)
                        mb_cache_entry_release(ce);
+               /*
+                * Beware of this ugliness: Releasing of xattr block references
+                * from different inodes can race and so we have to protect
+                * from a race where someone else frees the block (and releases
+                * its journal_head) before we are done dirtying the buffer. In
+                * nojournal mode this race is harmless and we actually cannot
+                * call ext4_handle_dirty_xattr_block() with locked buffer as
+                * that function can call sync_dirty_buffer() so for that case
+                * we handle the dirtying after unlocking the buffer.
+                */
+               if (ext4_handle_valid(handle))
+                       error = ext4_handle_dirty_xattr_block(handle, inode,
+                                                             bh);
                unlock_buffer(bh);
-               error = ext4_handle_dirty_xattr_block(handle, inode, bh);
+               if (!ext4_handle_valid(handle))
+                       error = ext4_handle_dirty_xattr_block(handle, inode,
+                                                             bh);
                if (IS_SYNC(inode))
                        ext4_handle_sync(handle);
                dquot_free_block(inode, EXT4_C2B(EXT4_SB(inode->i_sb), 1));
index 9ead1596399a12ef66ecf5f087b59463424ccf5d..72c82f69b01b28594e56bb9518df6f211f0d51a9 100644 (file)
@@ -274,15 +274,15 @@ static long do_fcntl(int fd, unsigned int cmd, unsigned long arg,
                break;
 #if BITS_PER_LONG != 32
        /* 32-bit arches must use fcntl64() */
-       case F_GETLKP:
+       case F_OFD_GETLK:
 #endif
        case F_GETLK:
                err = fcntl_getlk(filp, cmd, (struct flock __user *) arg);
                break;
 #if BITS_PER_LONG != 32
        /* 32-bit arches must use fcntl64() */
-       case F_SETLKP:
-       case F_SETLKPW:
+       case F_OFD_SETLK:
+       case F_OFD_SETLKW:
 #endif
                /* Fallthrough */
        case F_SETLK:
@@ -399,13 +399,13 @@ SYSCALL_DEFINE3(fcntl64, unsigned int, fd, unsigned int, cmd,
        
        switch (cmd) {
        case F_GETLK64:
-       case F_GETLKP:
+       case F_OFD_GETLK:
                err = fcntl_getlk64(f.file, cmd, (struct flock64 __user *) arg);
                break;
        case F_SETLK64:
        case F_SETLKW64:
-       case F_SETLKP:
-       case F_SETLKPW:
+       case F_OFD_SETLK:
+       case F_OFD_SETLKW:
                err = fcntl_setlk64(fd, f.file, cmd,
                                (struct flock64 __user *) arg);
                break;
index 78f3403300afd5d825ab833964a443953d58f995..ac127cd008bfeb268c5be789e339b03538d2597c 100644 (file)
@@ -232,9 +232,6 @@ static int kernfs_link_sibling(struct kernfs_node *kn)
        struct rb_node **node = &kn->parent->dir.children.rb_node;
        struct rb_node *parent = NULL;
 
-       if (kernfs_type(kn) == KERNFS_DIR)
-               kn->parent->dir.subdirs++;
-
        while (*node) {
                struct kernfs_node *pos;
                int result;
@@ -249,9 +246,15 @@ static int kernfs_link_sibling(struct kernfs_node *kn)
                else
                        return -EEXIST;
        }
+
        /* add new node and rebalance the tree */
        rb_link_node(&kn->rb, parent, node);
        rb_insert_color(&kn->rb, &kn->parent->dir.children);
+
+       /* successfully added, account subdir number */
+       if (kernfs_type(kn) == KERNFS_DIR)
+               kn->parent->dir.subdirs++;
+
        return 0;
 }
 
index 8034706a7af87523bfc40e8660f21cc54238563f..e01ea4a14a014b3123dddf6d2ad1ecb9a6381736 100644 (file)
@@ -484,6 +484,8 @@ static int kernfs_fop_mmap(struct file *file, struct vm_area_struct *vma)
 
        ops = kernfs_ops(of->kn);
        rc = ops->mmap(of, vma);
+       if (rc)
+               goto out_put;
 
        /*
         * PowerPC's pci_mmap of legacy_mem uses shmem_zero_setup()
index 13fc7a6d380ae6648945c8956cc53901de2d0ccc..e663aeac579e5d8aaa2596a177114b17f262bd6c 100644 (file)
 #define IS_POSIX(fl)   (fl->fl_flags & FL_POSIX)
 #define IS_FLOCK(fl)   (fl->fl_flags & FL_FLOCK)
 #define IS_LEASE(fl)   (fl->fl_flags & (FL_LEASE|FL_DELEG))
-#define IS_FILE_PVT(fl)        (fl->fl_flags & FL_FILE_PVT)
+#define IS_OFDLCK(fl)  (fl->fl_flags & FL_OFDLCK)
 
 static bool lease_breaking(struct file_lock *fl)
 {
@@ -564,7 +564,7 @@ static void __locks_insert_block(struct file_lock *blocker,
        BUG_ON(!list_empty(&waiter->fl_block));
        waiter->fl_next = blocker;
        list_add_tail(&waiter->fl_block, &blocker->fl_block);
-       if (IS_POSIX(blocker) && !IS_FILE_PVT(blocker))
+       if (IS_POSIX(blocker) && !IS_OFDLCK(blocker))
                locks_insert_global_blocked(waiter);
 }
 
@@ -759,12 +759,12 @@ EXPORT_SYMBOL(posix_test_lock);
  * of tasks (such as posix threads) sharing the same open file table.
  * To handle those cases, we just bail out after a few iterations.
  *
- * For FL_FILE_PVT locks, the owner is the filp, not the files_struct.
+ * For FL_OFDLCK locks, the owner is the filp, not the files_struct.
  * Because the owner is not even nominally tied to a thread of
  * execution, the deadlock detection below can't reasonably work well. Just
  * skip it for those.
  *
- * In principle, we could do a more limited deadlock detection on FL_FILE_PVT
+ * In principle, we could do a more limited deadlock detection on FL_OFDLCK
  * locks that just checks for the case where two tasks are attempting to
  * upgrade from read to write locks on the same inode.
  */
@@ -791,9 +791,9 @@ static int posix_locks_deadlock(struct file_lock *caller_fl,
 
        /*
         * This deadlock detector can't reasonably detect deadlocks with
-        * FL_FILE_PVT locks, since they aren't owned by a process, per-se.
+        * FL_OFDLCK locks, since they aren't owned by a process, per-se.
         */
-       if (IS_FILE_PVT(caller_fl))
+       if (IS_OFDLCK(caller_fl))
                return 0;
 
        while ((block_fl = what_owner_is_waiting_for(block_fl))) {
@@ -1391,11 +1391,10 @@ int __break_lease(struct inode *inode, unsigned int mode, unsigned int type)
 
 restart:
        break_time = flock->fl_break_time;
-       if (break_time != 0) {
+       if (break_time != 0)
                break_time -= jiffies;
-               if (break_time == 0)
-                       break_time++;
-       }
+       if (break_time == 0)
+               break_time++;
        locks_insert_block(flock, new_fl);
        spin_unlock(&inode->i_lock);
        error = wait_event_interruptible_timeout(new_fl->fl_wait,
@@ -1891,7 +1890,7 @@ EXPORT_SYMBOL_GPL(vfs_test_lock);
 
 static int posix_lock_to_flock(struct flock *flock, struct file_lock *fl)
 {
-       flock->l_pid = IS_FILE_PVT(fl) ? -1 : fl->fl_pid;
+       flock->l_pid = IS_OFDLCK(fl) ? -1 : fl->fl_pid;
 #if BITS_PER_LONG == 32
        /*
         * Make sure we can represent the posix lock via
@@ -1913,7 +1912,7 @@ static int posix_lock_to_flock(struct flock *flock, struct file_lock *fl)
 #if BITS_PER_LONG == 32
 static void posix_lock_to_flock64(struct flock64 *flock, struct file_lock *fl)
 {
-       flock->l_pid = IS_FILE_PVT(fl) ? -1 : fl->fl_pid;
+       flock->l_pid = IS_OFDLCK(fl) ? -1 : fl->fl_pid;
        flock->l_start = fl->fl_start;
        flock->l_len = fl->fl_end == OFFSET_MAX ? 0 :
                fl->fl_end - fl->fl_start + 1;
@@ -1942,13 +1941,13 @@ int fcntl_getlk(struct file *filp, unsigned int cmd, struct flock __user *l)
        if (error)
                goto out;
 
-       if (cmd == F_GETLKP) {
+       if (cmd == F_OFD_GETLK) {
                error = -EINVAL;
                if (flock.l_pid != 0)
                        goto out;
 
                cmd = F_GETLK;
-               file_lock.fl_flags |= FL_FILE_PVT;
+               file_lock.fl_flags |= FL_OFDLCK;
                file_lock.fl_owner = (fl_owner_t)filp;
        }
 
@@ -2074,25 +2073,25 @@ again:
 
        /*
         * If the cmd is requesting file-private locks, then set the
-        * FL_FILE_PVT flag and override the owner.
+        * FL_OFDLCK flag and override the owner.
         */
        switch (cmd) {
-       case F_SETLKP:
+       case F_OFD_SETLK:
                error = -EINVAL;
                if (flock.l_pid != 0)
                        goto out;
 
                cmd = F_SETLK;
-               file_lock->fl_flags |= FL_FILE_PVT;
+               file_lock->fl_flags |= FL_OFDLCK;
                file_lock->fl_owner = (fl_owner_t)filp;
                break;
-       case F_SETLKPW:
+       case F_OFD_SETLKW:
                error = -EINVAL;
                if (flock.l_pid != 0)
                        goto out;
 
                cmd = F_SETLKW;
-               file_lock->fl_flags |= FL_FILE_PVT;
+               file_lock->fl_flags |= FL_OFDLCK;
                file_lock->fl_owner = (fl_owner_t)filp;
                /* Fallthrough */
        case F_SETLKW:
@@ -2144,13 +2143,13 @@ int fcntl_getlk64(struct file *filp, unsigned int cmd, struct flock64 __user *l)
        if (error)
                goto out;
 
-       if (cmd == F_GETLKP) {
+       if (cmd == F_OFD_GETLK) {
                error = -EINVAL;
                if (flock.l_pid != 0)
                        goto out;
 
                cmd = F_GETLK64;
-               file_lock.fl_flags |= FL_FILE_PVT;
+               file_lock.fl_flags |= FL_OFDLCK;
                file_lock.fl_owner = (fl_owner_t)filp;
        }
 
@@ -2209,25 +2208,25 @@ again:
 
        /*
         * If the cmd is requesting file-private locks, then set the
-        * FL_FILE_PVT flag and override the owner.
+        * FL_OFDLCK flag and override the owner.
         */
        switch (cmd) {
-       case F_SETLKP:
+       case F_OFD_SETLK:
                error = -EINVAL;
                if (flock.l_pid != 0)
                        goto out;
 
                cmd = F_SETLK64;
-               file_lock->fl_flags |= FL_FILE_PVT;
+               file_lock->fl_flags |= FL_OFDLCK;
                file_lock->fl_owner = (fl_owner_t)filp;
                break;
-       case F_SETLKPW:
+       case F_OFD_SETLKW:
                error = -EINVAL;
                if (flock.l_pid != 0)
                        goto out;
 
                cmd = F_SETLKW64;
-               file_lock->fl_flags |= FL_FILE_PVT;
+               file_lock->fl_flags |= FL_OFDLCK;
                file_lock->fl_owner = (fl_owner_t)filp;
                /* Fallthrough */
        case F_SETLKW64:
@@ -2413,8 +2412,8 @@ static void lock_get_status(struct seq_file *f, struct file_lock *fl,
        if (IS_POSIX(fl)) {
                if (fl->fl_flags & FL_ACCESS)
                        seq_printf(f, "ACCESS");
-               else if (IS_FILE_PVT(fl))
-                       seq_printf(f, "FLPVT ");
+               else if (IS_OFDLCK(fl))
+                       seq_printf(f, "OFDLCK");
                else
                        seq_printf(f, "POSIX ");
 
index 39c8ef875f91b5a93b57c8886b56569d679296a3..2c73cae9899d25007818373e11eaec1c8fdb9436 100644 (file)
@@ -654,9 +654,11 @@ static struct rpc_clnt *create_backchannel_client(struct rpc_create_args *args)
 
 static int setup_callback_client(struct nfs4_client *clp, struct nfs4_cb_conn *conn, struct nfsd4_session *ses)
 {
+       int maxtime = max_cb_time(clp->net);
        struct rpc_timeout      timeparms = {
-               .to_initval     = max_cb_time(clp->net),
+               .to_initval     = maxtime,
                .to_retries     = 0,
+               .to_maxval      = maxtime,
        };
        struct rpc_create_args args = {
                .net            = clp->net,
index 2723c1badd01276f9c1802d6cac210aa40f8f0a3..18881f34737ad89e8259042fd33ee5d6a9c26edc 100644 (file)
@@ -3627,14 +3627,6 @@ nfsd4_encode_operation(struct nfsd4_compoundres *resp, struct nfsd4_op *op)
        /* nfsd4_check_resp_size guarantees enough room for error status */
        if (!op->status)
                op->status = nfsd4_check_resp_size(resp, 0);
-       if (op->status == nfserr_resource && nfsd4_has_session(&resp->cstate)) {
-               struct nfsd4_slot *slot = resp->cstate.slot;
-
-               if (slot->sl_flags & NFSD4_SLOT_CACHETHIS)
-                       op->status = nfserr_rep_too_big_to_cache;
-               else
-                       op->status = nfserr_rep_too_big;
-       }
        if (so) {
                so->so_replay.rp_status = op->status;
                so->so_replay.rp_buflen = (char *)resp->p - (char *)(statp+1);
index 3d30eb1fc95e383e50e91605d3526161bcfdebde..9d64679cec73b00fc4685e23d69374ca122fed09 100644 (file)
--- a/fs/open.c
+++ b/fs/open.c
@@ -254,16 +254,21 @@ int do_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
                return -EBADF;
 
        /*
-        * It's not possible to punch hole or perform collapse range
-        * on append only file
+        * We can only allow pure fallocate on append only files
         */
-       if (mode & (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_COLLAPSE_RANGE)
-           && IS_APPEND(inode))
+       if ((mode & ~FALLOC_FL_KEEP_SIZE) && IS_APPEND(inode))
                return -EPERM;
 
        if (IS_IMMUTABLE(inode))
                return -EPERM;
 
+       /*
+        * We can not allow to do any fallocate operation on an active
+        * swapfile
+        */
+       if (IS_SWAPFILE(inode))
+               ret = -ETXTBSY;
+
        /*
         * Revalidate the write permissions, in case security policy has
         * changed since the files were opened.
@@ -286,14 +291,6 @@ int do_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
        if (((offset + len) > inode->i_sb->s_maxbytes) || ((offset + len) < 0))
                return -EFBIG;
 
-       /*
-        * There is no need to overlap collapse range with EOF, in which case
-        * it is effectively a truncate operation
-        */
-       if ((mode & FALLOC_FL_COLLAPSE_RANGE) &&
-           (offset + len >= i_size_read(inode)))
-               return -EINVAL;
-
        if (!file->f_op->fallocate)
                return -EOPNOTSUPP;
 
index a1266089eca1fc0054065dfc723e697daf5691e6..a81c7b556896115a4afbdea5452523057ccd195f 100644 (file)
@@ -1556,7 +1556,7 @@ static int ubifs_remount_rw(struct ubifs_info *c)
        if (c->space_fixup) {
                err = ubifs_fixup_free_space(c);
                if (err)
-                       return err;
+                       goto out;
        }
 
        err = check_free_space(c);
index 82afdcb33183951350df18d3ce05b3aeecdf3e76..951a2321ee010f35c1d3395c09d0830d74197cfd 100644 (file)
@@ -841,7 +841,15 @@ xfs_file_fallocate(
                        goto out_unlock;
                }
 
-               ASSERT(offset + len < i_size_read(inode));
+               /*
+                * There is no need to overlap collapse range with EOF,
+                * in which case it is effectively a truncate operation
+                */
+               if (offset + len >= i_size_read(inode)) {
+                       error = -EINVAL;
+                       goto out_unlock;
+               }
+
                new_size = i_size_read(inode) - len;
 
                error = xfs_collapse_file_space(ip, offset, len);
index 5a64ca4621f3f650e3c6718137a8ea549198d171..f23174fb9ec4340378df59b5cc89b43ecf342bec 100644 (file)
@@ -93,5 +93,8 @@ static inline unsigned long virt_to_fix(const unsigned long vaddr)
 #define set_fixmap_io(idx, phys) \
        __set_fixmap(idx, phys, FIXMAP_PAGE_IO)
 
+#define set_fixmap_offset_io(idx, phys) \
+       __set_fixmap_offset(idx, phys, FIXMAP_PAGE_IO)
+
 #endif /* __ASSEMBLY__ */
 #endif /* __ASM_GENERIC_FIXMAP_H */
index d3909effd7256ee1334910f72ab57becccff93f9..94f9ea8abcae35af8ca36560403fbd25facb7c65 100644 (file)
@@ -50,11 +50,7 @@ static inline bool has_zero(unsigned long val, unsigned long *data, const struct
 }
 
 #ifndef zero_bytemask
-#ifdef CONFIG_64BIT
-#define zero_bytemask(mask)    (~0ul << fls64(mask))
-#else
-#define zero_bytemask(mask)    (~0ul << fls(mask))
-#endif /* CONFIG_64BIT */
-#endif /* zero_bytemask */
+#define zero_bytemask(mask) (~1ul << __fls(mask))
+#endif
 
 #endif /* _ASM_WORD_AT_A_TIME_H */
index 8c1603b10665d141a2b0cb67e2643c0db7452146..433528ab51611ad684b546f6d9fccaf15de970ca 100644 (file)
@@ -29,7 +29,7 @@
 /* 10 (register bit affects spdif_in and spdif_out) */
 #define TEGRA124_CLK_I2S1 11
 #define TEGRA124_CLK_I2C1 12
-#define TEGRA124_CLK_NDFLASH 13
+/* 13 */
 #define TEGRA124_CLK_SDMMC1 14
 #define TEGRA124_CLK_SDMMC4 15
 /* 16 */
@@ -83,7 +83,7 @@
 
 /* 64 */
 #define TEGRA124_CLK_UARTD 65
-#define TEGRA124_CLK_UARTE 66
+/* 66 */
 #define TEGRA124_CLK_I2C3 67
 #define TEGRA124_CLK_SBC4 68
 #define TEGRA124_CLK_SDMMC3 69
@@ -97,7 +97,7 @@
 #define TEGRA124_CLK_TRACE 77
 #define TEGRA124_CLK_SOC_THERM 78
 #define TEGRA124_CLK_DTV 79
-#define TEGRA124_CLK_NDSPEED 80
+/* 80 */
 #define TEGRA124_CLK_I2CSLOW 81
 #define TEGRA124_CLK_DSIB 82
 #define TEGRA124_CLK_TSEC 83
index 78c6c52073ad62948a7d750951ed94d232957048..a0875001b13c84ad70a9b2909654e9ffb6824c58 100644 (file)
@@ -10,8 +10,8 @@
  *
  */
 
-#ifndef CAN_CORE_H
-#define CAN_CORE_H
+#ifndef _CAN_CORE_H
+#define _CAN_CORE_H
 
 #include <linux/can.h>
 #include <linux/skbuff.h>
@@ -58,4 +58,4 @@ extern void can_rx_unregister(struct net_device *dev, canid_t can_id,
 extern int can_send(struct sk_buff *skb, int loop);
 extern int can_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
 
-#endif /* CAN_CORE_H */
+#endif /* !_CAN_CORE_H */
index 3ce5e526525f852f37ea242700363036c85068ca..6992afc6ba7f96fda9dba202f771fe7e28f807fe 100644 (file)
@@ -10,8 +10,8 @@
  *
  */
 
-#ifndef CAN_DEV_H
-#define CAN_DEV_H
+#ifndef _CAN_DEV_H
+#define _CAN_DEV_H
 
 #include <linux/can.h>
 #include <linux/can/netlink.h>
@@ -132,4 +132,4 @@ struct sk_buff *alloc_canfd_skb(struct net_device *dev,
 struct sk_buff *alloc_can_err_skb(struct net_device *dev,
                                  struct can_frame **cf);
 
-#endif /* CAN_DEV_H */
+#endif /* !_CAN_DEV_H */
index 9c1167baf273e7e62cb29dbfb89266e11f1c29d6..e0475c5cbb92aac6fe1163bc2ac65a4664048152 100644 (file)
@@ -6,8 +6,8 @@
  * published by the Free Software Foundation.
  */
 
-#ifndef CAN_LED_H
-#define CAN_LED_H
+#ifndef _CAN_LED_H
+#define _CAN_LED_H
 
 #include <linux/if.h>
 #include <linux/leds.h>
@@ -48,4 +48,4 @@ static inline void can_led_notifier_exit(void)
 
 #endif
 
-#endif
+#endif /* !_CAN_LED_H */
index 7702641f87ee032b76d2cfa68fdd5f75aec4ec2b..78b2d44f04cffc83f7ce7feb4cd08c94cea84ce8 100644 (file)
@@ -1,5 +1,5 @@
-#ifndef _CAN_PLATFORM_CC770_H_
-#define _CAN_PLATFORM_CC770_H_
+#ifndef _CAN_PLATFORM_CC770_H
+#define _CAN_PLATFORM_CC770_H
 
 /* CPU Interface Register (0x02) */
 #define CPUIF_CEN      0x01    /* Clock Out Enable */
@@ -30,4 +30,4 @@ struct cc770_platform_data {
        u8 bcr;         /* Bus Configuration Register */
 };
 
-#endif /* !_CAN_PLATFORM_CC770_H_ */
+#endif /* !_CAN_PLATFORM_CC770_H */
index dc029dba7a030d384d9389038b6f7d600eee3dbf..d44fcae274ff2a0877c06091bfaafe68e0cc8d6d 100644 (file)
@@ -1,5 +1,5 @@
-#ifndef __CAN_PLATFORM_MCP251X_H__
-#define __CAN_PLATFORM_MCP251X_H__
+#ifndef _CAN_PLATFORM_MCP251X_H
+#define _CAN_PLATFORM_MCP251X_H
 
 /*
  *
@@ -18,4 +18,4 @@ struct mcp251x_platform_data {
        unsigned long oscillator_frequency;
 };
 
-#endif /* __CAN_PLATFORM_MCP251X_H__ */
+#endif /* !_CAN_PLATFORM_MCP251X_H */
diff --git a/include/linux/can/platform/rcar_can.h b/include/linux/can/platform/rcar_can.h
new file mode 100644 (file)
index 0000000..0f4a2f3
--- /dev/null
@@ -0,0 +1,17 @@
+#ifndef _CAN_PLATFORM_RCAR_CAN_H_
+#define _CAN_PLATFORM_RCAR_CAN_H_
+
+#include <linux/types.h>
+
+/* Clock Select Register settings */
+enum CLKR {
+       CLKR_CLKP1 = 0, /* Peripheral clock (clkp1) */
+       CLKR_CLKP2 = 1, /* Peripheral clock (clkp2) */
+       CLKR_CLKEXT = 3 /* Externally input clock */
+};
+
+struct rcar_can_platform_data {
+       enum CLKR clock_select; /* Clock source select */
+};
+
+#endif /* !_CAN_PLATFORM_RCAR_CAN_H_ */
index 96f8fcc78d787a3b826967b77328ea6f4e1dc8ef..93570b61ec6c58bfa433e6b4f710fb9a4e466121 100644 (file)
@@ -1,5 +1,5 @@
-#ifndef _CAN_PLATFORM_SJA1000_H_
-#define _CAN_PLATFORM_SJA1000_H_
+#ifndef _CAN_PLATFORM_SJA1000_H
+#define _CAN_PLATFORM_SJA1000_H
 
 /* clock divider register */
 #define CDR_CLKOUT_MASK 0x07
@@ -32,4 +32,4 @@ struct sja1000_platform_data {
        u8 cdr;         /* clock divider register */
 };
 
-#endif /* !_CAN_PLATFORM_SJA1000_H_ */
+#endif /* !_CAN_PLATFORM_SJA1000_H */
index af17cb3f7a8402bdd3816911abb9f98ebf60632f..a52f47ca6c8ad9c5159c34b4f568b5d84eccbd18 100644 (file)
@@ -1,5 +1,5 @@
-#ifndef __CAN_PLATFORM_TI_HECC_H__
-#define __CAN_PLATFORM_TI_HECC_H__
+#ifndef _CAN_PLATFORM_TI_HECC_H
+#define _CAN_PLATFORM_TI_HECC_H
 
 /*
  * TI HECC (High End CAN Controller) driver platform header
@@ -41,4 +41,4 @@ struct ti_hecc_platform_data {
        u32 version;
        void (*transceiver_switch) (int);
 };
-#endif
+#endif /* !_CAN_PLATFORM_TI_HECC_H */
index f9bbbb472663af08aef78ac152e8293f36736ea4..cc00d15c6107be8893b024e3eabb2ef1ba243016 100644 (file)
@@ -7,8 +7,8 @@
  *
  */
 
-#ifndef CAN_SKB_H
-#define CAN_SKB_H
+#ifndef _CAN_SKB_H
+#define _CAN_SKB_H
 
 #include <linux/types.h>
 #include <linux/skbuff.h>
@@ -80,4 +80,4 @@ static inline struct sk_buff *can_create_echo_skb(struct sk_buff *skb)
        return skb;
 }
 
-#endif /* CAN_SKB_H */
+#endif /* !_CAN_SKB_H */
index 0a114d05f68d35bd275924a7290a6c3b75ef17fd..212f537fc686a8384c7618e97f5e02cdea5f524c 100644 (file)
@@ -154,13 +154,23 @@ static inline u32 ethtool_rxfh_indir_default(u32 index, u32 n_rx_rings)
  * @reset: Reset (part of) the device, as specified by a bitmask of
  *     flags from &enum ethtool_reset_flags.  Returns a negative
  *     error code or zero.
+ * @get_rxfh_key_size: Get the size of the RX flow hash key.
+ *     Returns zero if not supported for this specific device.
  * @get_rxfh_indir_size: Get the size of the RX flow hash indirection table.
  *     Returns zero if not supported for this specific device.
  * @get_rxfh_indir: Get the contents of the RX flow hash indirection table.
  *     Will not be called if @get_rxfh_indir_size returns zero.
+ * @get_rxfh: Get the contents of the RX flow hash indirection table and hash
+ *     key.
+ *     Will not be called if @get_rxfh_indir_size and @get_rxfh_key_size
+ *     returns zero.
  *     Returns a negative error code or zero.
  * @set_rxfh_indir: Set the contents of the RX flow hash indirection table.
  *     Will not be called if @get_rxfh_indir_size returns zero.
+ * @set_rxfh: Set the contents of the RX flow hash indirection table and
+ *     hash key.
+ *     Will not be called if @get_rxfh_indir_size and @get_rxfh_key_size
+ *     returns zero.
  *     Returns a negative error code or zero.
  * @get_channels: Get number of channels.
  * @set_channels: Set number of channels.  Returns a negative error code or
@@ -232,7 +242,10 @@ struct ethtool_ops {
        int     (*set_rxnfc)(struct net_device *, struct ethtool_rxnfc *);
        int     (*flash_device)(struct net_device *, struct ethtool_flash *);
        int     (*reset)(struct net_device *, u32 *);
+       u32     (*get_rxfh_key_size)(struct net_device *);
        u32     (*get_rxfh_indir_size)(struct net_device *);
+       int     (*get_rxfh)(struct net_device *, u32 *, u8 *);
+       int     (*set_rxfh)(struct net_device *, u32 *, u8 *);
        int     (*get_rxfh_indir)(struct net_device *, u32 *);
        int     (*set_rxfh_indir)(struct net_device *, const u32 *);
        void    (*get_channels)(struct net_device *, struct ethtool_channels *);
index 024fd03e5d182d5670ee2c60005cbea43f8a83e8..7977b3958e25321435fc5fad48c66ace242215a7 100644 (file)
 #define BPF_CALL       0x80    /* function call */
 #define BPF_EXIT       0x90    /* function return */
 
+/* Placeholder/dummy for 0 */
+#define BPF_0          0
+
+/* Register numbers */
+enum {
+       BPF_REG_0 = 0,
+       BPF_REG_1,
+       BPF_REG_2,
+       BPF_REG_3,
+       BPF_REG_4,
+       BPF_REG_5,
+       BPF_REG_6,
+       BPF_REG_7,
+       BPF_REG_8,
+       BPF_REG_9,
+       BPF_REG_10,
+       __MAX_BPF_REG,
+};
+
 /* BPF has 10 general purpose 64-bit registers and stack frame. */
-#define MAX_BPF_REG    11
+#define MAX_BPF_REG    __MAX_BPF_REG
+
+/* ArgX, context and stack frame pointer register positions. Note,
+ * Arg1, Arg2, Arg3, etc are used as argument mappings of function
+ * calls in BPF_CALL instruction.
+ */
+#define BPF_REG_ARG1   BPF_REG_1
+#define BPF_REG_ARG2   BPF_REG_2
+#define BPF_REG_ARG3   BPF_REG_3
+#define BPF_REG_ARG4   BPF_REG_4
+#define BPF_REG_ARG5   BPF_REG_5
+#define BPF_REG_CTX    BPF_REG_6
+#define BPF_REG_FP     BPF_REG_10
+
+/* Additional register mappings for converted user programs. */
+#define BPF_REG_A      BPF_REG_0
+#define BPF_REG_X      BPF_REG_7
+#define BPF_REG_TMP    BPF_REG_8
 
 /* BPF program can access up to 512 bytes of stack space. */
 #define MAX_BPF_STACK  512
 
-/* Arg1, context and stack frame pointer register positions. */
-#define ARG1_REG       1
-#define CTX_REG                6
-#define FP_REG         10
+/* bpf_add|sub|...: a += x, bpf_mov: a = x */
+#define BPF_ALU64_REG(op, a, x) \
+       ((struct sock_filter_int) {BPF_ALU64|BPF_OP(op)|BPF_X, a, x, 0, 0})
+#define BPF_ALU32_REG(op, a, x) \
+       ((struct sock_filter_int) {BPF_ALU|BPF_OP(op)|BPF_X, a, x, 0, 0})
+
+/* bpf_add|sub|...: a += imm, bpf_mov: a = imm */
+#define BPF_ALU64_IMM(op, a, imm) \
+       ((struct sock_filter_int) {BPF_ALU64|BPF_OP(op)|BPF_K, a, 0, 0, imm})
+#define BPF_ALU32_IMM(op, a, imm) \
+       ((struct sock_filter_int) {BPF_ALU|BPF_OP(op)|BPF_K, a, 0, 0, imm})
+
+/* R0 = *(uint *) (skb->data + off) */
+#define BPF_LD_ABS(size, off) \
+       ((struct sock_filter_int) {BPF_LD|BPF_SIZE(size)|BPF_ABS, 0, 0, 0, off})
+
+/* R0 = *(uint *) (skb->data + x + off) */
+#define BPF_LD_IND(size, x, off) \
+       ((struct sock_filter_int) {BPF_LD|BPF_SIZE(size)|BPF_IND, 0, x, 0, off})
+
+/* a = *(uint *) (x + off) */
+#define BPF_LDX_MEM(sz, a, x, off) \
+       ((struct sock_filter_int) {BPF_LDX|BPF_SIZE(sz)|BPF_MEM, a, x, off, 0})
+
+/* if (a 'op' x) goto pc+off */
+#define BPF_JMP_REG(op, a, x, off) \
+       ((struct sock_filter_int) {BPF_JMP|BPF_OP(op)|BPF_X, a, x, off, 0})
+
+/* if (a 'op' imm) goto pc+off */
+#define BPF_JMP_IMM(op, a, imm, off) \
+       ((struct sock_filter_int) {BPF_JMP|BPF_OP(op)|BPF_K, a, 0, off, imm})
+
+#define BPF_EXIT_INSN() \
+       ((struct sock_filter_int) {BPF_JMP|BPF_EXIT, 0, 0, 0, 0})
+
+static inline int size_to_bpf(int size)
+{
+       switch (size) {
+       case 1:
+               return BPF_B;
+       case 2:
+               return BPF_H;
+       case 4:
+               return BPF_W;
+       case 8:
+               return BPF_DW;
+       default:
+               return -EINVAL;
+       }
+}
+
+/* Macro to invoke filter function. */
+#define SK_RUN_FILTER(filter, ctx)  (*filter->bpf_func)(ctx, filter->insnsi)
 
 struct sock_filter_int {
        __u8    code;           /* opcode */
@@ -97,15 +182,10 @@ static inline unsigned int sk_filter_size(unsigned int proglen)
 #define sk_filter_proglen(fprog)                       \
                (fprog->len * sizeof(fprog->filter[0]))
 
-#define SK_RUN_FILTER(filter, ctx)                     \
-               (*filter->bpf_func)(ctx, filter->insnsi)
-
 int sk_filter(struct sock *sk, struct sk_buff *skb);
 
-u32 sk_run_filter_int_seccomp(const struct seccomp_data *ctx,
-                             const struct sock_filter_int *insni);
-u32 sk_run_filter_int_skb(const struct sk_buff *ctx,
-                         const struct sock_filter_int *insni);
+void sk_filter_select_runtime(struct sk_filter *fp);
+void sk_filter_free(struct sk_filter *fp);
 
 int sk_convert_filter(struct sock_filter *prog, int len,
                      struct sock_filter_int *new_prog, int *new_len);
@@ -125,6 +205,9 @@ void sk_decode_filter(struct sock_filter *filt, struct sock_filter *to);
 void sk_filter_charge(struct sock *sk, struct sk_filter *fp);
 void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp);
 
+u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
+void bpf_int_jit_compile(struct sk_filter *fp);
+
 #ifdef CONFIG_BPF_JIT
 #include <stdarg.h>
 #include <linux/linkage.h>
@@ -223,6 +306,7 @@ enum {
        BPF_S_ANC_VLAN_TAG,
        BPF_S_ANC_VLAN_TAG_PRESENT,
        BPF_S_ANC_PAY_OFFSET,
+       BPF_S_ANC_RANDOM,
 };
 
 #endif /* __LINUX_FILTER_H__ */
index 7a9c5bca2b7694f5496dbcf793eea2920fd37af9..878031227c57a0b41be7cde3070ee90f1921b570 100644 (file)
@@ -815,7 +815,7 @@ static inline struct file *get_file(struct file *f)
 #define FL_SLEEP       128     /* A blocking lock */
 #define FL_DOWNGRADE_PENDING   256 /* Lease is being downgraded */
 #define FL_UNLOCK_PENDING      512 /* Lease is being broken */
-#define FL_FILE_PVT    1024    /* lock is private to the file */
+#define FL_OFDLCK      1024    /* lock is "owned" by struct file */
 
 /*
  * Special return value from posix_lock_file() and vfs_lock_file() for
index 9212b017bc7236cfc63afe5c268cc741995a1af4..ae9504b4b67d3026cd9c1c1fcd30a8cfc928c984 100644 (file)
@@ -535,6 +535,7 @@ static inline int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_a
 extern int ftrace_arch_read_dyn_info(char *buf, int size);
 
 extern int skip_trace(unsigned long ip);
+extern void ftrace_module_init(struct module *mod);
 
 extern void ftrace_disable_daemon(void);
 extern void ftrace_enable_daemon(void);
@@ -544,6 +545,7 @@ static inline int ftrace_force_update(void) { return 0; }
 static inline void ftrace_disable_daemon(void) { }
 static inline void ftrace_enable_daemon(void) { }
 static inline void ftrace_release_mod(struct module *mod) {}
+static inline void ftrace_module_init(struct module *mod) {}
 static inline __init int register_ftrace_command(struct ftrace_func_command *cmd)
 {
        return -EINVAL;
index 13bbbde00e68de454c8cf30f0581796d55eb0a40..8c0fb7f3a9a50aed2432f97122fd78a8f4b0718a 100644 (file)
@@ -106,7 +106,7 @@ struct vlan_pcpu_stats {
 
 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
 
-extern struct net_device *__vlan_find_dev_deep(struct net_device *real_dev,
+extern struct net_device *__vlan_find_dev_deep_rcu(struct net_device *real_dev,
                                               __be16 vlan_proto, u16 vlan_id);
 extern struct net_device *vlan_dev_real_dev(const struct net_device *dev);
 extern u16 vlan_dev_vlan_id(const struct net_device *dev);
@@ -199,7 +199,7 @@ extern void vlan_vids_del_by_dev(struct net_device *dev,
 extern bool vlan_uses_dev(const struct net_device *dev);
 #else
 static inline struct net_device *
-__vlan_find_dev_deep(struct net_device *real_dev,
+__vlan_find_dev_deep_rcu(struct net_device *real_dev,
                     __be16 vlan_proto, u16 vlan_id)
 {
        return NULL;
index c7bfac1c4a7b8f6c82742b4d9f97c058131ae4fc..97ac926c78a707fb6bf45293d00e8f4d86515f43 100644 (file)
@@ -203,7 +203,40 @@ static inline int check_wakeup_irqs(void) { return 0; }
 
 extern cpumask_var_t irq_default_affinity;
 
-extern int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask);
+/* Internal implementation. Use the helpers below */
+extern int __irq_set_affinity(unsigned int irq, const struct cpumask *cpumask,
+                             bool force);
+
+/**
+ * irq_set_affinity - Set the irq affinity of a given irq
+ * @irq:       Interrupt to set affinity
+ * @cpumask:   cpumask
+ *
+ * Fails if cpumask does not contain an online CPU
+ */
+static inline int
+irq_set_affinity(unsigned int irq, const struct cpumask *cpumask)
+{
+       return __irq_set_affinity(irq, cpumask, false);
+}
+
+/**
+ * irq_force_affinity - Force the irq affinity of a given irq
+ * @irq:       Interrupt to set affinity
+ * @cpumask:   cpumask
+ *
+ * Same as irq_set_affinity, but without checking the mask against
+ * online cpus.
+ *
+ * Solely for low level cpu hotplug code, where we need to make per
+ * cpu interrupts affine before the cpu becomes online.
+ */
+static inline int
+irq_force_affinity(unsigned int irq, const struct cpumask *cpumask)
+{
+       return __irq_set_affinity(irq, cpumask, true);
+}
+
 extern int irq_can_set_affinity(unsigned int irq);
 extern int irq_select_affinity(unsigned int irq);
 
index d278838908cbc3f1cdac08ae3f3714934f01f188..5c57efb863d08e5937a36e06778efa8047060156 100644 (file)
@@ -394,7 +394,8 @@ extern void remove_percpu_irq(unsigned int irq, struct irqaction *act);
 
 extern void irq_cpu_online(void);
 extern void irq_cpu_offline(void);
-extern int __irq_set_affinity_locked(struct irq_data *data,  const struct cpumask *cpumask);
+extern int irq_set_affinity_locked(struct irq_data *data,
+                                  const struct cpumask *cpumask, bool force);
 
 #if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_PENDING_IRQ)
 void irq_move_irq(struct irq_data *data);
@@ -602,6 +603,8 @@ static inline u32 irq_get_trigger_type(unsigned int irq)
        return d ? irqd_get_trigger_type(d) : 0;
 }
 
+unsigned int arch_dynirq_lower_bound(unsigned int from);
+
 int __irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node,
                struct module *owner);
 
index 1de36be64df4d1516a2a451901733d36fd32ec4e..5ab4e3a76721760e4d5a70f88de282a687858990 100644 (file)
@@ -822,6 +822,7 @@ struct ata_port {
        unsigned long           qc_allocated;
        unsigned int            qc_active;
        int                     nr_active_links; /* #links with active qcs */
+       unsigned int            last_tag;       /* track next tag hw expects */
 
        struct ata_link         link;           /* host default link */
        struct ata_link         *slave_link;    /* see ata_slave_link_init() */
index ba87bd21295a533c8d6941bc4c11d29bceba0112..c0468e6f0442a9d92be3509e3228b9bb4e078d06 100644 (file)
@@ -577,6 +577,9 @@ struct mlx4_cq {
 
        u32                     cons_index;
 
+       u16                     irq;
+       bool                    irq_affinity_change;
+
        __be32                 *set_ci_db;
        __be32                 *arm_db;
        int                     arm_sn;
index a803d792df1e6c9c6d3a83ed2ae81a0b9265cae0..f4ad247fd324f2184314364a3e5a532d8d19781c 100644 (file)
@@ -56,9 +56,6 @@ struct device;
 struct phy_device;
 /* 802.11 specific */
 struct wireless_dev;
-                                       /* source back-compat hooks */
-#define SET_ETHTOOL_OPS(netdev,ops) \
-       ( (netdev)->ethtool_ops = (ops) )
 
 void netdev_set_default_ethtool_ops(struct net_device *dev,
                                    const struct ethtool_ops *ops);
@@ -3156,6 +3153,20 @@ const char *netdev_drivername(const struct net_device *dev);
 
 void linkwatch_run_queue(void);
 
+static inline netdev_features_t netdev_intersect_features(netdev_features_t f1,
+                                                         netdev_features_t f2)
+{
+       if (f1 & NETIF_F_GEN_CSUM)
+               f1 |= (NETIF_F_ALL_CSUM & ~NETIF_F_GEN_CSUM);
+       if (f2 & NETIF_F_GEN_CSUM)
+               f2 |= (NETIF_F_ALL_CSUM & ~NETIF_F_GEN_CSUM);
+       f1 &= f2;
+       if (f1 & NETIF_F_GEN_CSUM)
+               f1 &= ~(NETIF_F_ALL_CSUM & ~NETIF_F_GEN_CSUM);
+
+       return f1;
+}
+
 static inline netdev_features_t netdev_get_wanted_features(
        struct net_device *dev)
 {
@@ -3181,12 +3192,7 @@ void netdev_change_features(struct net_device *dev);
 void netif_stacked_transfer_operstate(const struct net_device *rootdev,
                                        struct net_device *dev);
 
-netdev_features_t netif_skb_dev_features(struct sk_buff *skb,
-                                        const struct net_device *dev);
-static inline netdev_features_t netif_skb_features(struct sk_buff *skb)
-{
-       return netif_skb_dev_features(skb, skb->dev);
-}
+netdev_features_t netif_skb_features(struct sk_buff *skb);
 
 static inline bool net_gso_ok(netdev_features_t features, int gso_type)
 {
index aad8eeaf416d4bbf6d7ae6a4b40a27d4c346f1c4..7a28115dd3965a7f64487378f5661bda8592b33c 100644 (file)
@@ -45,7 +45,8 @@ struct netlink_kernel_cfg {
        unsigned int    flags;
        void            (*input)(struct sk_buff *skb);
        struct mutex    *cb_mutex;
-       void            (*bind)(int group);
+       int             (*bind)(int group);
+       void            (*unbind)(int group);
        bool            (*compare)(struct net *net, struct sock *sk);
 };
 
@@ -169,4 +170,11 @@ struct netlink_tap {
 extern int netlink_add_tap(struct netlink_tap *nt);
 extern int netlink_remove_tap(struct netlink_tap *nt);
 
+bool __netlink_ns_capable(const struct netlink_skb_parms *nsp,
+                         struct user_namespace *ns, int cap);
+bool netlink_ns_capable(const struct sk_buff *skb,
+                       struct user_namespace *ns, int cap);
+bool netlink_capable(const struct sk_buff *skb, int cap);
+bool netlink_net_capable(const struct sk_buff *skb, int cap);
+
 #endif /* __LINUX_NETLINK_H */
index c8d7f3965fff913a55f158a53b7bc0944c1d4b61..20163b9a0eae70cfdfba688bab5dc08eed5fcfdb 100644 (file)
@@ -80,6 +80,22 @@ enum {
 
        IEEE802154_ATTR_FRAME_RETRIES,
 
+       IEEE802154_ATTR_LLSEC_ENABLED,
+       IEEE802154_ATTR_LLSEC_SECLEVEL,
+       IEEE802154_ATTR_LLSEC_KEY_MODE,
+       IEEE802154_ATTR_LLSEC_KEY_SOURCE_SHORT,
+       IEEE802154_ATTR_LLSEC_KEY_SOURCE_EXTENDED,
+       IEEE802154_ATTR_LLSEC_KEY_ID,
+       IEEE802154_ATTR_LLSEC_FRAME_COUNTER,
+       IEEE802154_ATTR_LLSEC_KEY_BYTES,
+       IEEE802154_ATTR_LLSEC_KEY_USAGE_FRAME_TYPES,
+       IEEE802154_ATTR_LLSEC_KEY_USAGE_COMMANDS,
+       IEEE802154_ATTR_LLSEC_FRAME_TYPE,
+       IEEE802154_ATTR_LLSEC_CMD_FRAME_ID,
+       IEEE802154_ATTR_LLSEC_SECLEVELS,
+       IEEE802154_ATTR_LLSEC_DEV_OVERRIDE,
+       IEEE802154_ATTR_LLSEC_DEV_KEY_MODE,
+
        __IEEE802154_ATTR_MAX,
 };
 
@@ -134,6 +150,21 @@ enum {
 
        IEEE802154_SET_MACPARAMS,
 
+       IEEE802154_LLSEC_GETPARAMS,
+       IEEE802154_LLSEC_SETPARAMS,
+       IEEE802154_LLSEC_LIST_KEY,
+       IEEE802154_LLSEC_ADD_KEY,
+       IEEE802154_LLSEC_DEL_KEY,
+       IEEE802154_LLSEC_LIST_DEV,
+       IEEE802154_LLSEC_ADD_DEV,
+       IEEE802154_LLSEC_DEL_DEV,
+       IEEE802154_LLSEC_LIST_DEVKEY,
+       IEEE802154_LLSEC_ADD_DEVKEY,
+       IEEE802154_LLSEC_DEL_DEVKEY,
+       IEEE802154_LLSEC_LIST_SECLEVEL,
+       IEEE802154_LLSEC_ADD_SECLEVEL,
+       IEEE802154_LLSEC_DEL_SECLEVEL,
+
        __IEEE802154_CMD_MAX,
 };
 
index 3f23b4472c3150990237a566ba2d46acb1d1aa82..6404253d810d7482a64fa9e959e99c8bad05c912 100644 (file)
@@ -44,11 +44,16 @@ extern void of_irq_init(const struct of_device_id *matches);
 
 #ifdef CONFIG_OF_IRQ
 extern int of_irq_count(struct device_node *dev);
+extern int of_irq_get(struct device_node *dev, int index);
 #else
 static inline int of_irq_count(struct device_node *dev)
 {
        return 0;
 }
+static inline int of_irq_get(struct device_node *dev, int index)
+{
+       return 0;
+}
 #endif
 
 #if defined(CONFIG_OF)
index 6fe8464ed767f0dac6481a6ffc7b39ecaebdd648..d449018d07265200f4d8d6eeaf8ddac1a9010ebd 100644 (file)
@@ -22,16 +22,18 @@ extern struct phy_device *of_phy_connect(struct net_device *dev,
 struct phy_device *of_phy_attach(struct net_device *dev,
                                 struct device_node *phy_np, u32 flags,
                                 phy_interface_t iface);
-extern struct phy_device *of_phy_connect_fixed_link(struct net_device *dev,
-                                        void (*hndlr)(struct net_device *),
-                                        phy_interface_t iface);
 
 extern struct mii_bus *of_mdio_find_bus(struct device_node *mdio_np);
 
 #else /* CONFIG_OF */
 static inline int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
 {
-       return -ENOSYS;
+       /*
+        * Fall back to the non-DT function to register a bus.
+        * This way, we don't have to keep compat bits around in drivers.
+        */
+
+       return mdiobus_register(mdio);
 }
 
 static inline struct phy_device *of_phy_find_device(struct device_node *phy_np)
@@ -54,17 +56,25 @@ static inline struct phy_device *of_phy_attach(struct net_device *dev,
        return NULL;
 }
 
-static inline struct phy_device *of_phy_connect_fixed_link(struct net_device *dev,
-                                                          void (*hndlr)(struct net_device *),
-                                                          phy_interface_t iface)
+static inline struct mii_bus *of_mdio_find_bus(struct device_node *mdio_np)
 {
        return NULL;
 }
+#endif /* CONFIG_OF */
 
-static inline struct mii_bus *of_mdio_find_bus(struct device_node *mdio_np)
+#if defined(CONFIG_OF) && defined(CONFIG_FIXED_PHY)
+extern int of_phy_register_fixed_link(struct device_node *np);
+extern bool of_phy_is_fixed_link(struct device_node *np);
+#else
+static inline int of_phy_register_fixed_link(struct device_node *np)
 {
-       return NULL;
+       return -ENOSYS;
 }
-#endif /* CONFIG_OF */
+static inline bool of_phy_is_fixed_link(struct device_node *np)
+{
+       return false;
+}
+#endif
+
 
 #endif /* __LINUX_OF_MDIO_H */
index 51d15f684e7e0b0259b2183c83097ec77baf541e..864ddafad8cc2f0697b181d9c113ca02ef715613 100644 (file)
@@ -198,6 +198,13 @@ static inline struct mii_bus *mdiobus_alloc(void)
 int mdiobus_register(struct mii_bus *bus);
 void mdiobus_unregister(struct mii_bus *bus);
 void mdiobus_free(struct mii_bus *bus);
+struct mii_bus *devm_mdiobus_alloc_size(struct device *dev, int sizeof_priv);
+static inline struct mii_bus *devm_mdiobus_alloc(struct device *dev)
+{
+       return devm_mdiobus_alloc_size(dev, 0);
+}
+
+void devm_mdiobus_free(struct device *dev, struct mii_bus *bus);
 struct phy_device *mdiobus_scan(struct mii_bus *bus, int addr);
 int mdiobus_read(struct mii_bus *bus, int addr, u32 regnum);
 int mdiobus_write(struct mii_bus *bus, int addr, u32 regnum, u16 val);
index e2f5ca96cddc521fcc62a868039c8361f4e48997..2760744cb2a75ff59e3d172a6dd864ef42b32106 100644 (file)
@@ -174,21 +174,29 @@ void devm_of_phy_provider_unregister(struct device *dev,
 #else
 static inline int phy_pm_runtime_get(struct phy *phy)
 {
+       if (!phy)
+               return 0;
        return -ENOSYS;
 }
 
 static inline int phy_pm_runtime_get_sync(struct phy *phy)
 {
+       if (!phy)
+               return 0;
        return -ENOSYS;
 }
 
 static inline int phy_pm_runtime_put(struct phy *phy)
 {
+       if (!phy)
+               return 0;
        return -ENOSYS;
 }
 
 static inline int phy_pm_runtime_put_sync(struct phy *phy)
 {
+       if (!phy)
+               return 0;
        return -ENOSYS;
 }
 
@@ -204,21 +212,29 @@ static inline void phy_pm_runtime_forbid(struct phy *phy)
 
 static inline int phy_init(struct phy *phy)
 {
+       if (!phy)
+               return 0;
        return -ENOSYS;
 }
 
 static inline int phy_exit(struct phy *phy)
 {
+       if (!phy)
+               return 0;
        return -ENOSYS;
 }
 
 static inline int phy_power_on(struct phy *phy)
 {
+       if (!phy)
+               return 0;
        return -ENOSYS;
 }
 
 static inline int phy_power_off(struct phy *phy)
 {
+       if (!phy)
+               return 0;
        return -ENOSYS;
 }
 
index 509d8f5f984e3985cffe178b8a335498f30bcf50..4f2478b4713651a7dd19e17d19d589b658f52470 100644 (file)
@@ -9,15 +9,26 @@ struct fixed_phy_status {
        int asym_pause;
 };
 
+struct device_node;
+
 #ifdef CONFIG_FIXED_PHY
 extern int fixed_phy_add(unsigned int irq, int phy_id,
                         struct fixed_phy_status *status);
+extern int fixed_phy_register(unsigned int irq,
+                             struct fixed_phy_status *status,
+                             struct device_node *np);
 #else
 static inline int fixed_phy_add(unsigned int irq, int phy_id,
                                struct fixed_phy_status *status)
 {
        return -ENODEV;
 }
+static inline int fixed_phy_register(unsigned int irq,
+                                    struct fixed_phy_status *status,
+                                    struct device_node *np)
+{
+       return -ENODEV;
+}
 #endif /* CONFIG_FIXED_PHY */
 
 /*
index e530681bea7049cfa818e66acea0bf863952d281..1a4a8c157b31a7eb8cf3e9c1bedba8ab64354542 100644 (file)
@@ -258,14 +258,14 @@ regulator_get_exclusive(struct device *dev, const char *id)
 static inline struct regulator *__must_check
 regulator_get_optional(struct device *dev, const char *id)
 {
-       return NULL;
+       return ERR_PTR(-ENODEV);
 }
 
 
 static inline struct regulator *__must_check
 devm_regulator_get_optional(struct device *dev, const char *id)
 {
-       return NULL;
+       return ERR_PTR(-ENODEV);
 }
 
 static inline void regulator_put(struct regulator *regulator)
index 36aac733840afc1b71deee284cfaf78b973aa673..9f779c7a2da467c7945cff496e7609a3a16bd71a 100644 (file)
@@ -23,6 +23,7 @@ struct serio {
 
        char name[32];
        char phys[32];
+       char firmware_id[128];
 
        bool manual_bind;
 
index 08074a8101646d0c438415979124b3a5ff8283bb..7a9beeb1c458fb1b4beeb09b62a549d417187cdf 100644 (file)
@@ -426,7 +426,7 @@ static inline u32 skb_mstamp_us_delta(const struct skb_mstamp *t1,
  *     @csum_start: Offset from skb->head where checksumming should start
  *     @csum_offset: Offset from csum_start where checksum should be stored
  *     @priority: Packet queueing priority
- *     @local_df: allow local fragmentation
+ *     @ignore_df: allow local fragmentation
  *     @cloned: Head may be cloned (check refcnt to be sure)
  *     @ip_summed: Driver fed us an IP checksum
  *     @nohdr: Payload reference only, must not modify header
@@ -514,7 +514,7 @@ struct sk_buff {
        };
        __u32                   priority;
        kmemcheck_bitfield_begin(flags1);
-       __u8                    local_df:1,
+       __u8                    ignore_df:1,
                                cloned:1,
                                ip_summed:2,
                                nohdr:1,
@@ -2741,6 +2741,99 @@ static inline __sum16 skb_checksum_complete(struct sk_buff *skb)
               0 : __skb_checksum_complete(skb);
 }
 
+/* Check if we need to perform checksum complete validation.
+ *
+ * Returns true if checksum complete is needed, false otherwise
+ * (either checksum is unnecessary or zero checksum is allowed).
+ */
+static inline bool __skb_checksum_validate_needed(struct sk_buff *skb,
+                                                 bool zero_okay,
+                                                 __sum16 check)
+{
+       if (skb_csum_unnecessary(skb)) {
+               return false;
+       } else if (zero_okay && !check) {
+               skb->ip_summed = CHECKSUM_UNNECESSARY;
+               return false;
+       }
+
+       return true;
+}
+
+/* For small packets <= CHECKSUM_BREAK peform checksum complete directly
+ * in checksum_init.
+ */
+#define CHECKSUM_BREAK 76
+
+/* Validate (init) checksum based on checksum complete.
+ *
+ * Return values:
+ *   0: checksum is validated or try to in skb_checksum_complete. In the latter
+ *     case the ip_summed will not be CHECKSUM_UNNECESSARY and the pseudo
+ *     checksum is stored in skb->csum for use in __skb_checksum_complete
+ *   non-zero: value of invalid checksum
+ *
+ */
+static inline __sum16 __skb_checksum_validate_complete(struct sk_buff *skb,
+                                                      bool complete,
+                                                      __wsum psum)
+{
+       if (skb->ip_summed == CHECKSUM_COMPLETE) {
+               if (!csum_fold(csum_add(psum, skb->csum))) {
+                       skb->ip_summed = CHECKSUM_UNNECESSARY;
+                       return 0;
+               }
+       }
+
+       skb->csum = psum;
+
+       if (complete || skb->len <= CHECKSUM_BREAK)
+               return __skb_checksum_complete(skb);
+
+       return 0;
+}
+
+static inline __wsum null_compute_pseudo(struct sk_buff *skb, int proto)
+{
+       return 0;
+}
+
+/* Perform checksum validate (init). Note that this is a macro since we only
+ * want to calculate the pseudo header which is an input function if necessary.
+ * First we try to validate without any computation (checksum unnecessary) and
+ * then calculate based on checksum complete calling the function to compute
+ * pseudo header.
+ *
+ * Return values:
+ *   0: checksum is validated or try to in skb_checksum_complete
+ *   non-zero: value of invalid checksum
+ */
+#define __skb_checksum_validate(skb, proto, complete,                  \
+                               zero_okay, check, compute_pseudo)       \
+({                                                                     \
+       __sum16 __ret = 0;                                              \
+       if (__skb_checksum_validate_needed(skb, zero_okay, check))      \
+               __ret = __skb_checksum_validate_complete(skb,           \
+                               complete, compute_pseudo(skb, proto));  \
+       __ret;                                                          \
+})
+
+#define skb_checksum_init(skb, proto, compute_pseudo)                  \
+       __skb_checksum_validate(skb, proto, false, false, 0, compute_pseudo)
+
+#define skb_checksum_init_zero_check(skb, proto, check, compute_pseudo)        \
+       __skb_checksum_validate(skb, proto, false, true, check, compute_pseudo)
+
+#define skb_checksum_validate(skb, proto, compute_pseudo)              \
+       __skb_checksum_validate(skb, proto, true, false, 0, compute_pseudo)
+
+#define skb_checksum_validate_zero_check(skb, proto, check,            \
+                                        compute_pseudo)                \
+       __skb_checksum_validate_(skb, proto, true, true, check, compute_pseudo)
+
+#define skb_checksum_simple_validate(skb)                              \
+       __skb_checksum_validate(skb, 0, true, false, 0, null_compute_pseudo)
+
 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
 void nf_conntrack_destroy(struct nf_conntrack *nfct);
 static inline void nf_conntrack_put(struct nf_conntrack *nfct)
index 54f91d35e5fd76f13b94d8297b10be6642e07887..46cca4c06848346ca84753ac182526a4514ff277 100644 (file)
@@ -23,7 +23,7 @@ int sock_diag_check_cookie(void *sk, __u32 *cookie);
 void sock_diag_save_cookie(void *sk, __u32 *cookie);
 
 int sock_diag_put_meminfo(struct sock *sk, struct sk_buff *skb, int attr);
-int sock_diag_put_filterinfo(struct user_namespace *user_ns, struct sock *sk,
+int sock_diag_put_filterinfo(bool may_report_filterinfo, struct sock *sk,
                             struct sk_buff *skb, int attrtype);
 
 #endif
index aa327a8105ada0d2502d697c6f66699e8f8c516b..b2b1afbb32024ebbb80be196ea276a7ff53ae43e 100644 (file)
@@ -26,20 +26,6 @@ struct at86rf230_platform_data {
        int rstn;
        int slp_tr;
        int dig2;
-
-       /* Setting the irq_type will configure the driver to request
-        * the platform irq trigger type according to the given value
-        * and configure the interrupt polarity of the device to the
-        * corresponding polarity.
-        *
-        * Allowed values are: IRQF_TRIGGER_RISING, IRQF_TRIGGER_FALLING,
-        *                     IRQF_TRIGGER_HIGH and IRQF_TRIGGER_LOW
-        *
-        * Setting it to 0, the driver does not touch the trigger type
-        * configuration of the interrupt and sets the interrupt polarity
-        * of the device to high active (the default value).
-        */
-       int irq_type;
 };
 
 #endif
index 239946868142cec2893e89259555d3b86884d616..a0513210798fc9027af01dccccc5ff6c677d3d7e 100644 (file)
@@ -197,7 +197,8 @@ struct tcp_sock {
        u8      do_early_retrans:1,/* Enable RFC5827 early-retransmit  */
                syn_data:1,     /* SYN includes data */
                syn_fastopen:1, /* SYN includes Fast Open option */
-               syn_data_acked:1;/* data in SYN is acked by SYN-ACK */
+               syn_data_acked:1,/* data in SYN is acked by SYN-ACK */
+               is_cwnd_limited:1;/* forward progress limited by snd_cwnd? */
        u32     tlp_high_seq;   /* snd_nxt at the time of TLP retransmit. */
 
 /* RTT measurement */
@@ -209,6 +210,8 @@ struct tcp_sock {
 
        u32     packets_out;    /* Packets which are "in flight"        */
        u32     retrans_out;    /* Retransmitted packets out            */
+       u32     max_packets_out;  /* max packets_out in last window */
+       u32     max_packets_seq;  /* right edge of max_packets_out flight */
 
        u16     urg_data;       /* Saved octet of OOB data and control flags */
        u8      ecn_flags;      /* ECN status bits.                     */
@@ -365,11 +368,6 @@ static inline bool tcp_passive_fastopen(const struct sock *sk)
                tcp_sk(sk)->fastopen_rsk != NULL);
 }
 
-static inline bool fastopen_cookie_present(struct tcp_fastopen_cookie *foc)
-{
-       return foc->len != -1;
-}
-
 extern void tcp_sock_destruct(struct sock *sk);
 
 static inline int fastopen_init_queue(struct sock *sk, int backlog)
index 44b38b92236a5834f022185e99707ea9ac6edc39..7c9b484735c533ed4243be050d8ad2e8c67d581a 100644 (file)
 #define        CDC_NCM_NTB_MAX_SIZE_TX                 32768   /* bytes */
 #define        CDC_NCM_NTB_MAX_SIZE_RX                 32768   /* bytes */
 
+/* Initial NTB length */
+#define        CDC_NCM_NTB_DEF_SIZE_TX                 16384   /* bytes */
+#define        CDC_NCM_NTB_DEF_SIZE_RX                 16384   /* bytes */
+
 /* Minimum value for MaxDatagramSize, ch. 6.2.9 */
 #define        CDC_NCM_MIN_DATAGRAM_SIZE               1514    /* bytes */
 
 /* Restart the timer, if amount of datagrams is less than given value */
 #define        CDC_NCM_RESTART_TIMER_DATAGRAM_CNT      3
 #define        CDC_NCM_TIMER_PENDING_CNT               2
-#define CDC_NCM_TIMER_INTERVAL                 (400UL * NSEC_PER_USEC)
-
-/* The following macro defines the minimum header space */
-#define        CDC_NCM_MIN_HDR_SIZE \
-       (sizeof(struct usb_cdc_ncm_nth16) + sizeof(struct usb_cdc_ncm_ndp16) + \
-       (CDC_NCM_DPT_DATAGRAMS_MAX + 1) * sizeof(struct usb_cdc_ncm_dpe16))
-
-#define CDC_NCM_NDP_SIZE \
-       (sizeof(struct usb_cdc_ncm_ndp16) +                             \
-             (CDC_NCM_DPT_DATAGRAMS_MAX + 1) * sizeof(struct usb_cdc_ncm_dpe16))
+#define CDC_NCM_TIMER_INTERVAL_USEC            400UL
+#define CDC_NCM_TIMER_INTERVAL_MIN             5UL
+#define CDC_NCM_TIMER_INTERVAL_MAX             (U32_MAX / NSEC_PER_USEC)
 
 #define cdc_ncm_comm_intf_is_mbim(x)  ((x)->desc.bInterfaceSubClass == USB_CDC_SUBCLASS_MBIM && \
                                       (x)->desc.bInterfaceProtocol == USB_CDC_PROTO_NONE)
@@ -107,6 +104,9 @@ struct cdc_ncm_ctx {
        spinlock_t mtx;
        atomic_t stop;
 
+       u32 timer_interval;
+       u32 max_ndp_size;
+
        u32 tx_timer_pending;
        u32 tx_curr_frame_num;
        u32 rx_max;
@@ -118,10 +118,21 @@ struct cdc_ncm_ctx {
        u16 tx_ndp_modulus;
        u16 tx_seq;
        u16 rx_seq;
-       u16 connected;
+       u16 min_tx_pkt;
+
+       /* statistics */
+       u32 tx_curr_frame_payload;
+       u32 tx_reason_ntb_full;
+       u32 tx_reason_ndp_full;
+       u32 tx_reason_timeout;
+       u32 tx_reason_max_datagram;
+       u64 tx_overhead;
+       u64 tx_ntbs;
+       u64 rx_overhead;
+       u64 rx_ntbs;
 };
 
-u8 cdc_ncm_select_altsetting(struct usbnet *dev, struct usb_interface *intf);
+u8 cdc_ncm_select_altsetting(struct usb_interface *intf);
 int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_altsetting);
 void cdc_ncm_unbind(struct usbnet *dev, struct usb_interface *intf);
 struct sk_buff *cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign);
index 933a9f22a05ff63abb4379f94cb907c95f6beac4..f679877bb6017dd4a6da7d1fe0e6ea217ba3b3e4 100644 (file)
@@ -306,11 +306,6 @@ static inline void addrconf_addr_solict_mult(const struct in6_addr *addr,
                      htonl(0xFF000000) | addr->s6_addr32[3]);
 }
 
-static inline bool ipv6_addr_is_multicast(const struct in6_addr *addr)
-{
-       return (addr->s6_addr32[0] & htonl(0xFF000000)) == htonl(0xFF000000);
-}
-
 static inline bool ipv6_addr_is_ll_all_nodes(const struct in6_addr *addr)
 {
 #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
index f79ae2aa76d6a45fb9a1e452d53f82d05e101717..085940f7eeec0e56d5f95226034b32b906bc3380 100644 (file)
@@ -57,6 +57,14 @@ struct sockaddr_ieee802154 {
 /* get/setsockopt */
 #define SOL_IEEE802154 0
 
-#define WPAN_WANTACK   0
+#define WPAN_WANTACK           0
+#define WPAN_SECURITY          1
+#define WPAN_SECURITY_LEVEL    2
+
+#define WPAN_SECURITY_DEFAULT  0
+#define WPAN_SECURITY_OFF      1
+#define WPAN_SECURITY_ON       2
+
+#define WPAN_SECURITY_LEVEL_DEFAULT    (-1)
 
 #endif
index 7d64d3609ec97ea76751009e82d43c38a10ab5d8..4282778694006034bccca4ce3fbeba9be29537ec 100644 (file)
@@ -155,7 +155,11 @@ struct vsock_transport {
 
 /**** CORE ****/
 
-int vsock_core_init(const struct vsock_transport *t);
+int __vsock_core_init(const struct vsock_transport *t, struct module *owner);
+static inline int vsock_core_init(const struct vsock_transport *t)
+{
+       return __vsock_core_init(t, THIS_MODULE);
+}
 void vsock_core_exit(void);
 
 /**** UTILS ****/
index be150cf8cd432298d47860268a1d4566af74481d..4261a67682c032f84da321987e30ee97f510ffa8 100644 (file)
@@ -367,6 +367,7 @@ enum {
 #define HCI_ERROR_REMOTE_POWER_OFF     0x15
 #define HCI_ERROR_LOCAL_HOST_TERM      0x16
 #define HCI_ERROR_PAIRING_NOT_ALLOWED  0x18
+#define HCI_ERROR_ADVERTISING_TIMEOUT  0x3c
 
 /* Flow control modes */
 #define HCI_FLOW_CTL_MODE_PACKET_BASED 0x00
index 5f8bc05694ac665159bb25f7a9eaa704c3784e91..d73f41855ada2cbb7ce9dfc61c72cd9b592083ca 100644 (file)
@@ -68,6 +68,11 @@ struct discovery_state {
        struct list_head        unknown;        /* Name state not known */
        struct list_head        resolve;        /* Name needs to be resolved */
        __u32                   timestamp;
+       bdaddr_t                last_adv_addr;
+       u8                      last_adv_addr_type;
+       s8                      last_adv_rssi;
+       u8                      last_adv_data[HCI_MAX_AD_LENGTH];
+       u8                      last_adv_data_len;
 };
 
 struct hci_conn_hash {
@@ -194,6 +199,7 @@ struct hci_dev {
        __u16           le_scan_window;
        __u16           le_conn_min_interval;
        __u16           le_conn_max_interval;
+       __u16           discov_interleaved_timeout;
        __u8            ssp_debug_mode;
 
        __u16           devid_source;
@@ -1204,8 +1210,8 @@ void hci_sock_dev_event(struct hci_dev *hdev, int event);
  */
 #define DISCOV_LE_SCAN_WIN             0x12
 #define DISCOV_LE_SCAN_INT             0x12
-#define DISCOV_LE_TIMEOUT              msecs_to_jiffies(10240)
-#define DISCOV_INTERLEAVED_TIMEOUT     msecs_to_jiffies(5120)
+#define DISCOV_LE_TIMEOUT              10240   /* msec */
+#define DISCOV_INTERLEAVED_TIMEOUT     5120    /* msec */
 #define DISCOV_INTERLEAVED_INQUIRY_LEN 0x04
 #define DISCOV_BREDR_INQUIRY_LEN       0x08
 
@@ -1265,7 +1271,8 @@ void mgmt_read_local_oob_data_complete(struct hci_dev *hdev, u8 *hash192,
                                       u8 *randomizer256, u8 status);
 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
                       u8 addr_type, u8 *dev_class, s8 rssi, u8 cfm_name,
-                      u8 ssp, u8 *eir, u16 eir_len);
+                      u8 ssp, u8 *eir, u16 eir_len, u8 *scan_rsp,
+                      u8 scan_rsp_len);
 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
                      u8 addr_type, s8 rssi, u8 *name, u8 name_len);
 void mgmt_discovering(struct hci_dev *hdev, u8 discovering);
index f3539a15c41103b743c0571913fbb93dc402f40a..f2c3186555192cf7ee57f9ace0399bf1adfd1a6f 100644 (file)
@@ -109,6 +109,13 @@ enum ieee80211_band {
  *     channel as the control or any of the secondary channels.
  *     This may be due to the driver or due to regulatory bandwidth
  *     restrictions.
+ * @IEEE80211_CHAN_INDOOR_ONLY: see %NL80211_FREQUENCY_ATTR_INDOOR_ONLY
+ * @IEEE80211_CHAN_GO_CONCURRENT: see %NL80211_FREQUENCY_ATTR_GO_CONCURRENT
+ * @IEEE80211_CHAN_NO_20MHZ: 20 MHz bandwidth is not permitted
+ *     on this channel.
+ * @IEEE80211_CHAN_NO_10MHZ: 10 MHz bandwidth is not permitted
+ *     on this channel.
+ *
  */
 enum ieee80211_channel_flags {
        IEEE80211_CHAN_DISABLED         = 1<<0,
@@ -120,6 +127,10 @@ enum ieee80211_channel_flags {
        IEEE80211_CHAN_NO_OFDM          = 1<<6,
        IEEE80211_CHAN_NO_80MHZ         = 1<<7,
        IEEE80211_CHAN_NO_160MHZ        = 1<<8,
+       IEEE80211_CHAN_INDOOR_ONLY      = 1<<9,
+       IEEE80211_CHAN_GO_CONCURRENT    = 1<<10,
+       IEEE80211_CHAN_NO_20MHZ         = 1<<11,
+       IEEE80211_CHAN_NO_10MHZ         = 1<<12,
 };
 
 #define IEEE80211_CHAN_NO_HT40 \
@@ -441,10 +452,13 @@ bool cfg80211_chandef_usable(struct wiphy *wiphy,
  * cfg80211_chandef_dfs_required - checks if radar detection is required
  * @wiphy: the wiphy to validate against
  * @chandef: the channel definition to check
- * Return: 1 if radar detection is required, 0 if it is not, < 0 on error
+ * @iftype: the interface type as specified in &enum nl80211_iftype
+ * Returns:
+ *     1 if radar detection is required, 0 if it is not, < 0 on error
  */
 int cfg80211_chandef_dfs_required(struct wiphy *wiphy,
-                                 const struct cfg80211_chan_def *chandef);
+                                 const struct cfg80211_chan_def *chandef,
+                                 enum nl80211_iftype);
 
 /**
  * ieee80211_chandef_rate_flags - returns rate flags for a channel
@@ -654,7 +668,6 @@ struct cfg80211_acl_data {
  * @p2p_opp_ps: P2P opportunistic PS
  * @acl: ACL configuration used by the drivers which has support for
  *     MAC address based access control
- * @radar_required: set if radar detection is required
  */
 struct cfg80211_ap_settings {
        struct cfg80211_chan_def chandef;
@@ -672,7 +685,6 @@ struct cfg80211_ap_settings {
        u8 p2p_ctwindow;
        bool p2p_opp_ps;
        const struct cfg80211_acl_data *acl;
-       bool radar_required;
 };
 
 /**
@@ -2278,6 +2290,10 @@ struct cfg80211_qos_map {
  * @channel_switch: initiate channel-switch procedure (with CSA)
  *
  * @set_qos_map: Set QoS mapping information to the driver
+ *
+ * @set_ap_chanwidth: Set the AP (including P2P GO) mode channel width for the
+ *     given interface This is used e.g. for dynamic HT 20/40 MHz channel width
+ *     changes during the lifetime of the BSS.
  */
 struct cfg80211_ops {
        int     (*suspend)(struct wiphy *wiphy, struct cfg80211_wowlan *wow);
@@ -2521,9 +2537,13 @@ struct cfg80211_ops {
        int     (*channel_switch)(struct wiphy *wiphy,
                                  struct net_device *dev,
                                  struct cfg80211_csa_settings *params);
+
        int     (*set_qos_map)(struct wiphy *wiphy,
                               struct net_device *dev,
                               struct cfg80211_qos_map *qos_map);
+
+       int     (*set_ap_chanwidth)(struct wiphy *wiphy, struct net_device *dev,
+                                   struct cfg80211_chan_def *chandef);
 };
 
 /*
@@ -3194,6 +3214,7 @@ struct cfg80211_cached_keys;
  * @ibss_dfs_possible: (private) IBSS may change to a DFS channel
  * @event_list: (private) list for internal event processing
  * @event_lock: (private) lock for event list
+ * @owner_nlportid: (private) owner socket port ID
  */
 struct wireless_dev {
        struct wiphy *wiphy;
@@ -3241,6 +3262,8 @@ struct wireless_dev {
        unsigned long cac_start_time;
        unsigned int cac_time_ms;
 
+       u32 owner_nlportid;
+
 #ifdef CONFIG_CFG80211_WEXT
        /* wext data */
        struct {
@@ -3600,7 +3623,7 @@ int regulatory_hint(struct wiphy *wiphy, const char *alpha2);
  * default channel settings will be disregarded. If no rule is found for a
  * channel on the regulatory domain the channel will be disabled.
  * Drivers using this for a wiphy should also set the wiphy flag
- * WIPHY_FLAG_CUSTOM_REGULATORY or cfg80211 will set it for the wiphy
+ * REGULATORY_CUSTOM_REG or cfg80211 will set it for the wiphy
  * that called this helper.
  */
 void wiphy_apply_custom_regulatory(struct wiphy *wiphy,
@@ -3668,6 +3691,18 @@ void cfg80211_sched_scan_results(struct wiphy *wiphy);
  */
 void cfg80211_sched_scan_stopped(struct wiphy *wiphy);
 
+/**
+ * cfg80211_sched_scan_stopped_rtnl - notify that the scheduled scan has stopped
+ *
+ * @wiphy: the wiphy on which the scheduled scan stopped
+ *
+ * The driver can call this function to inform cfg80211 that the
+ * scheduled scan had to be stopped, for whatever reason.  The driver
+ * is then called back via the sched_scan_stop operation when done.
+ * This function should be called with rtnl locked.
+ */
+void cfg80211_sched_scan_stopped_rtnl(struct wiphy *wiphy);
+
 /**
  * cfg80211_inform_bss_width_frame - inform cfg80211 of a received BSS frame
  *
@@ -4531,12 +4566,14 @@ void cfg80211_report_obss_beacon(struct wiphy *wiphy,
  * cfg80211_reg_can_beacon - check if beaconing is allowed
  * @wiphy: the wiphy
  * @chandef: the channel definition
+ * @iftype: interface type
  *
  * Return: %true if there is no secondary channel or the secondary channel(s)
  * can be used for beaconing (i.e. is not a radar channel etc.)
  */
 bool cfg80211_reg_can_beacon(struct wiphy *wiphy,
-                            struct cfg80211_chan_def *chandef);
+                            struct cfg80211_chan_def *chandef,
+                            enum nl80211_iftype iftype);
 
 /*
  * cfg80211_ch_switch_notify - update wdev channel and notify userspace
@@ -4682,6 +4719,55 @@ void cfg80211_crit_proto_stopped(struct wireless_dev *wdev, gfp_t gfp);
  */
 unsigned int ieee80211_get_num_supported_channels(struct wiphy *wiphy);
 
+/**
+ * cfg80211_check_combinations - check interface combinations
+ *
+ * @wiphy: the wiphy
+ * @num_different_channels: the number of different channels we want
+ *     to use for verification
+ * @radar_detect: a bitmap where each bit corresponds to a channel
+ *     width where radar detection is needed, as in the definition of
+ *     &struct ieee80211_iface_combination.@radar_detect_widths
+ * @iftype_num: array with the numbers of interfaces of each interface
+ *     type.  The index is the interface type as specified in &enum
+ *     nl80211_iftype.
+ *
+ * This function can be called by the driver to check whether a
+ * combination of interfaces and their types are allowed according to
+ * the interface combinations.
+ */
+int cfg80211_check_combinations(struct wiphy *wiphy,
+                               const int num_different_channels,
+                               const u8 radar_detect,
+                               const int iftype_num[NUM_NL80211_IFTYPES]);
+
+/**
+ * cfg80211_iter_combinations - iterate over matching combinations
+ *
+ * @wiphy: the wiphy
+ * @num_different_channels: the number of different channels we want
+ *     to use for verification
+ * @radar_detect: a bitmap where each bit corresponds to a channel
+ *     width where radar detection is needed, as in the definition of
+ *     &struct ieee80211_iface_combination.@radar_detect_widths
+ * @iftype_num: array with the numbers of interfaces of each interface
+ *     type.  The index is the interface type as specified in &enum
+ *     nl80211_iftype.
+ * @iter: function to call for each matching combination
+ * @data: pointer to pass to iter function
+ *
+ * This function can be called by the driver to check what possible
+ * combinations it fits in at a given moment, e.g. for channel switching
+ * purposes.
+ */
+int cfg80211_iter_combinations(struct wiphy *wiphy,
+                              const int num_different_channels,
+                              const u8 radar_detect,
+                              const int iftype_num[NUM_NL80211_IFTYPES],
+                              void (*iter)(const struct ieee80211_iface_combination *c,
+                                           void *data),
+                              void *data);
+
 /* Logging, debugging and troubleshooting/diagnostic helpers. */
 
 /* wiphy_printk helpers, similar to dev_printk */
index a28f4e0f625193b0682932207a46578f094f52a9..87cb1903640d63ccfed890c957294af46a93cf24 100644 (file)
@@ -57,12 +57,14 @@ static __inline__ __wsum csum_and_copy_to_user
 }
 #endif
 
+#ifndef HAVE_ARCH_CSUM_ADD
 static inline __wsum csum_add(__wsum csum, __wsum addend)
 {
        u32 res = (__force u32)csum;
        res += (__force u32)addend;
        return (__force __wsum)(res + (res < (__force u32)addend));
 }
+#endif
 
 static inline __wsum csum_sub(__wsum csum, __wsum addend)
 {
index 7828ebf99ee132241b76e500681a43188143da99..6efce384451e56f16d8aab0847a6a7be4e94e0a0 100644 (file)
@@ -181,6 +181,11 @@ struct dsa_switch_driver {
 void register_switch_driver(struct dsa_switch_driver *type);
 void unregister_switch_driver(struct dsa_switch_driver *type);
 
+static inline void *ds_to_priv(struct dsa_switch *ds)
+{
+       return (void *)(ds + 1);
+}
+
 /*
  * The original DSA tag format and some other tag formats have no
  * ethertype, which means that we need to add a little hack to the
index c7ae0ac528dc1e5e1d3c2d5456c2d0e5221b6933..0aa7122e8f15390b4b6158429a245057fdd23e5d 100644 (file)
 #define IEEE802154_SCF_KEY_SHORT_INDEX         2
 #define IEEE802154_SCF_KEY_HW_INDEX            3
 
+#define IEEE802154_SCF_SECLEVEL_NONE           0
+#define IEEE802154_SCF_SECLEVEL_MIC32          1
+#define IEEE802154_SCF_SECLEVEL_MIC64          2
+#define IEEE802154_SCF_SECLEVEL_MIC128         3
+#define IEEE802154_SCF_SECLEVEL_ENC            4
+#define IEEE802154_SCF_SECLEVEL_ENC_MIC32      5
+#define IEEE802154_SCF_SECLEVEL_ENC_MIC64      6
+#define IEEE802154_SCF_SECLEVEL_ENC_MIC128     7
+
 /* MAC footer size */
 #define IEEE802154_MFR_SIZE    2 /* 2 octets */
 
index 5a719ca892f41c24eb45e849a64327cd6221d407..3b53c8e405e48143f667c20850db1666c402d8fa 100644 (file)
@@ -27,6 +27,7 @@
 #ifndef IEEE802154_NETDEVICE_H
 #define IEEE802154_NETDEVICE_H
 
+#include <net/ieee802154.h>
 #include <net/af_ieee802154.h>
 #include <linux/netdevice.h>
 #include <linux/skbuff.h>
@@ -114,6 +115,34 @@ int ieee802154_hdr_pull(struct sk_buff *skb, struct ieee802154_hdr *hdr);
 int ieee802154_hdr_peek_addrs(const struct sk_buff *skb,
                              struct ieee802154_hdr *hdr);
 
+/* parses the full 802.15.4 header a given skb and stores them into hdr,
+ * performing pan id decompression and length checks to be suitable for use in
+ * header_ops.parse
+ */
+int ieee802154_hdr_peek(const struct sk_buff *skb, struct ieee802154_hdr *hdr);
+
+int ieee802154_max_payload(const struct ieee802154_hdr *hdr);
+
+static inline int
+ieee802154_sechdr_authtag_len(const struct ieee802154_sechdr *sec)
+{
+       switch (sec->level) {
+       case IEEE802154_SCF_SECLEVEL_MIC32:
+       case IEEE802154_SCF_SECLEVEL_ENC_MIC32:
+               return 4;
+       case IEEE802154_SCF_SECLEVEL_MIC64:
+       case IEEE802154_SCF_SECLEVEL_ENC_MIC64:
+               return 8;
+       case IEEE802154_SCF_SECLEVEL_MIC128:
+       case IEEE802154_SCF_SECLEVEL_ENC_MIC128:
+               return 16;
+       case IEEE802154_SCF_SECLEVEL_NONE:
+       case IEEE802154_SCF_SECLEVEL_ENC:
+       default:
+               return 0;
+       }
+}
+
 static inline int ieee802154_hdr_length(struct sk_buff *skb)
 {
        struct ieee802154_hdr hdr;
@@ -193,8 +222,12 @@ static inline void ieee802154_addr_to_sa(struct ieee802154_addr_sa *sa,
  */
 struct ieee802154_mac_cb {
        u8 lqi;
-       u8 flags;
-       u8 seq;
+       u8 type;
+       bool ackreq;
+       bool secen;
+       bool secen_override;
+       u8 seclevel;
+       bool seclevel_override;
        struct ieee802154_addr source;
        struct ieee802154_addr dest;
 };
@@ -204,25 +237,96 @@ static inline struct ieee802154_mac_cb *mac_cb(struct sk_buff *skb)
        return (struct ieee802154_mac_cb *)skb->cb;
 }
 
-#define MAC_CB_FLAG_TYPEMASK           ((1 << 3) - 1)
-
-#define MAC_CB_FLAG_ACKREQ             (1 << 3)
-#define MAC_CB_FLAG_SECEN              (1 << 4)
-
-static inline bool mac_cb_is_ackreq(struct sk_buff *skb)
+static inline struct ieee802154_mac_cb *mac_cb_init(struct sk_buff *skb)
 {
-       return mac_cb(skb)->flags & MAC_CB_FLAG_ACKREQ;
-}
+       BUILD_BUG_ON(sizeof(struct ieee802154_mac_cb) > sizeof(skb->cb));
 
-static inline bool mac_cb_is_secen(struct sk_buff *skb)
-{
-       return mac_cb(skb)->flags & MAC_CB_FLAG_SECEN;
+       memset(skb->cb, 0, sizeof(struct ieee802154_mac_cb));
+       return mac_cb(skb);
 }
 
-static inline int mac_cb_type(struct sk_buff *skb)
-{
-       return mac_cb(skb)->flags & MAC_CB_FLAG_TYPEMASK;
-}
+#define IEEE802154_LLSEC_KEY_SIZE 16
+
+struct ieee802154_llsec_key_id {
+       u8 mode;
+       u8 id;
+       union {
+               struct ieee802154_addr device_addr;
+               __le32 short_source;
+               __le64 extended_source;
+       };
+};
+
+struct ieee802154_llsec_key {
+       u8 frame_types;
+       u32 cmd_frame_ids;
+       u8 key[IEEE802154_LLSEC_KEY_SIZE];
+};
+
+struct ieee802154_llsec_key_entry {
+       struct list_head list;
+
+       struct ieee802154_llsec_key_id id;
+       struct ieee802154_llsec_key *key;
+};
+
+struct ieee802154_llsec_device_key {
+       struct list_head list;
+
+       struct ieee802154_llsec_key_id key_id;
+       u32 frame_counter;
+};
+
+enum {
+       IEEE802154_LLSEC_DEVKEY_IGNORE,
+       IEEE802154_LLSEC_DEVKEY_RESTRICT,
+       IEEE802154_LLSEC_DEVKEY_RECORD,
+
+       __IEEE802154_LLSEC_DEVKEY_MAX,
+};
+
+struct ieee802154_llsec_device {
+       struct list_head list;
+
+       __le16 pan_id;
+       __le16 short_addr;
+       __le64 hwaddr;
+       u32 frame_counter;
+       bool seclevel_exempt;
+
+       u8 key_mode;
+       struct list_head keys;
+};
+
+struct ieee802154_llsec_seclevel {
+       struct list_head list;
+
+       u8 frame_type;
+       u8 cmd_frame_id;
+       bool device_override;
+       u32 sec_levels;
+};
+
+struct ieee802154_llsec_params {
+       bool enabled;
+
+       __be32 frame_counter;
+       u8 out_level;
+       struct ieee802154_llsec_key_id out_key;
+
+       __le64 default_key_source;
+
+       __le16 pan_id;
+       __le64 hwaddr;
+       __le64 coord_hwaddr;
+       __le16 coord_shortaddr;
+};
+
+struct ieee802154_llsec_table {
+       struct list_head keys;
+       struct list_head devices;
+       struct list_head security_levels;
+};
 
 #define IEEE802154_MAC_SCAN_ED         0
 #define IEEE802154_MAC_SCAN_ACTIVE     1
@@ -242,6 +346,53 @@ struct ieee802154_mac_params {
 };
 
 struct wpan_phy;
+
+enum {
+       IEEE802154_LLSEC_PARAM_ENABLED = 1 << 0,
+       IEEE802154_LLSEC_PARAM_FRAME_COUNTER = 1 << 1,
+       IEEE802154_LLSEC_PARAM_OUT_LEVEL = 1 << 2,
+       IEEE802154_LLSEC_PARAM_OUT_KEY = 1 << 3,
+       IEEE802154_LLSEC_PARAM_KEY_SOURCE = 1 << 4,
+       IEEE802154_LLSEC_PARAM_PAN_ID = 1 << 5,
+       IEEE802154_LLSEC_PARAM_HWADDR = 1 << 6,
+       IEEE802154_LLSEC_PARAM_COORD_HWADDR = 1 << 7,
+       IEEE802154_LLSEC_PARAM_COORD_SHORTADDR = 1 << 8,
+};
+
+struct ieee802154_llsec_ops {
+       int (*get_params)(struct net_device *dev,
+                         struct ieee802154_llsec_params *params);
+       int (*set_params)(struct net_device *dev,
+                         const struct ieee802154_llsec_params *params,
+                         int changed);
+
+       int (*add_key)(struct net_device *dev,
+                      const struct ieee802154_llsec_key_id *id,
+                      const struct ieee802154_llsec_key *key);
+       int (*del_key)(struct net_device *dev,
+                      const struct ieee802154_llsec_key_id *id);
+
+       int (*add_dev)(struct net_device *dev,
+                      const struct ieee802154_llsec_device *llsec_dev);
+       int (*del_dev)(struct net_device *dev, __le64 dev_addr);
+
+       int (*add_devkey)(struct net_device *dev,
+                         __le64 device_addr,
+                         const struct ieee802154_llsec_device_key *key);
+       int (*del_devkey)(struct net_device *dev,
+                         __le64 device_addr,
+                         const struct ieee802154_llsec_device_key *key);
+
+       int (*add_seclevel)(struct net_device *dev,
+                           const struct ieee802154_llsec_seclevel *sl);
+       int (*del_seclevel)(struct net_device *dev,
+                           const struct ieee802154_llsec_seclevel *sl);
+
+       void (*lock_table)(struct net_device *dev);
+       void (*get_table)(struct net_device *dev,
+                         struct ieee802154_llsec_table **t);
+       void (*unlock_table)(struct net_device *dev);
+};
 /*
  * This should be located at net_device->ml_priv
  *
@@ -272,6 +423,8 @@ struct ieee802154_mlme_ops {
        void (*get_mac_params)(struct net_device *dev,
                               struct ieee802154_mac_params *params);
 
+       struct ieee802154_llsec_ops *llsec;
+
        /* The fields below are required. */
 
        struct wpan_phy *(*get_phy)(const struct net_device *dev);
index 3bd22795c3e259e1f1f55176c808c6fdcc994600..84b20835b736c53b55a19eac0bbe187e65626fec 100644 (file)
@@ -150,7 +150,7 @@ static inline int INET_ECN_set_ce(struct sk_buff *skb)
 }
 
 /*
- * RFC 6080 4.2
+ * RFC 6040 4.2
  *  To decapsulate the inner header at the tunnel egress, a compliant
  *  tunnel egress MUST set the outgoing ECN field to the codepoint at the
  *  intersection of the appropriate arriving inner header (row) and outer
index 1bdb47715def0e21496ae89a59d3d9bd5f1f2c81..dd1950a7e2730e0024f1c82fac8ab20f9b533fe5 100644 (file)
@@ -292,12 +292,12 @@ static inline struct sock *inet_lookup_listener(struct net *net,
 #define INET_ADDR_COOKIE(__name, __saddr, __daddr) \
        const __addrpair __name = (__force __addrpair) ( \
                                   (((__force __u64)(__be32)(__saddr)) << 32) | \
-                                  ((__force __u64)(__be32)(__daddr)));
+                                  ((__force __u64)(__be32)(__daddr)))
 #else /* __LITTLE_ENDIAN */
 #define INET_ADDR_COOKIE(__name, __saddr, __daddr) \
        const __addrpair __name = (__force __addrpair) ( \
                                   (((__force __u64)(__be32)(__daddr)) << 32) | \
-                                  ((__force __u64)(__be32)(__saddr)));
+                                  ((__force __u64)(__be32)(__saddr)))
 #endif /* __BIG_ENDIAN */
 #define INET_MATCH(__sk, __net, __cookie, __saddr, __daddr, __ports, __dif)    \
        (((__sk)->sk_portpair == (__ports))                     &&      \
@@ -306,7 +306,9 @@ static inline struct sock *inet_lookup_listener(struct net *net,
           ((__sk)->sk_bound_dev_if == (__dif)))                &&      \
         net_eq(sock_net(__sk), (__net)))
 #else /* 32-bit arch */
-#define INET_ADDR_COOKIE(__name, __saddr, __daddr)
+#define INET_ADDR_COOKIE(__name, __saddr, __daddr) \
+       const int __name __deprecated __attribute__((unused))
+
 #define INET_MATCH(__sk, __net, __cookie, __saddr, __daddr, __ports, __dif) \
        (((__sk)->sk_portpair == (__ports))             &&              \
         ((__sk)->sk_daddr      == (__saddr))           &&              \
index 1833c3f389ee64a0c6b3862d4f2fbc6db0984b0a..b1edf17bec01130f9751747c4d092e5de50aaeac 100644 (file)
@@ -90,6 +90,7 @@ struct inet_request_sock {
        kmemcheck_bitfield_end(flags);
        struct ip_options_rcu   *opt;
        struct sk_buff          *pktopts;
+       u32                     ir_mark;
 };
 
 static inline struct inet_request_sock *inet_rsk(const struct request_sock *sk)
@@ -97,6 +98,15 @@ static inline struct inet_request_sock *inet_rsk(const struct request_sock *sk)
        return (struct inet_request_sock *)sk;
 }
 
+static inline u32 inet_request_mark(struct sock *sk, struct sk_buff *skb)
+{
+       if (!sk->sk_mark && sock_net(sk)->ipv4.sysctl_tcp_fwmark_accept) {
+               return skb->mark;
+       } else {
+               return sk->sk_mark;
+       }
+}
+
 struct inet_cork {
        unsigned int            flags;
        __be32                  addr;
index 3ec2b0fb9d8395384373917691f49c433262a8db..2e4947895d753a606c0cd0dfc641c87c96241dff 100644 (file)
@@ -196,35 +196,31 @@ void ip_send_unicast_reply(struct net *net, struct sk_buff *skb, __be32 daddr,
 #define NET_ADD_STATS_BH(net, field, adnd) SNMP_ADD_STATS_BH((net)->mib.net_statistics, field, adnd)
 #define NET_ADD_STATS_USER(net, field, adnd) SNMP_ADD_STATS_USER((net)->mib.net_statistics, field, adnd)
 
-unsigned long snmp_fold_field(void __percpu *mib[], int offt);
+unsigned long snmp_fold_field(void __percpu *mib, int offt);
 #if BITS_PER_LONG==32
-u64 snmp_fold_field64(void __percpu *mib[], int offt, size_t sync_off);
+u64 snmp_fold_field64(void __percpu *mib, int offt, size_t sync_off);
 #else
-static inline u64 snmp_fold_field64(void __percpu *mib[], int offt, size_t syncp_off)
+static inline u64 snmp_fold_field64(void __percpu *mib, int offt, size_t syncp_off)
 {
        return snmp_fold_field(mib, offt);
 }
 #endif
-int snmp_mib_init(void __percpu *ptr[2], size_t mibsize, size_t align);
-
-static inline void snmp_mib_free(void __percpu *ptr[SNMP_ARRAY_SZ])
-{
-       int i;
-
-       BUG_ON(ptr == NULL);
-       for (i = 0; i < SNMP_ARRAY_SZ; i++) {
-               free_percpu(ptr[i]);
-               ptr[i] = NULL;
-       }
-}
 
 void inet_get_local_port_range(struct net *net, int *low, int *high);
 
-extern unsigned long *sysctl_local_reserved_ports;
-static inline int inet_is_reserved_local_port(int port)
+#ifdef CONFIG_SYSCTL
+static inline int inet_is_local_reserved_port(struct net *net, int port)
 {
-       return test_bit(port, sysctl_local_reserved_ports);
+       if (!net->ipv4.sysctl_local_reserved_ports)
+               return 0;
+       return test_bit(port, net->ipv4.sysctl_local_reserved_ports);
 }
+#else
+static inline int inet_is_local_reserved_port(struct net *net, int port)
+{
+       return 0;
+}
+#endif
 
 extern int sysctl_ip_nonlocal_bind;
 
@@ -243,6 +239,9 @@ void ipfrag_init(void);
 
 void ip_static_sysctl_init(void);
 
+#define IP4_REPLY_MARK(net, mark) \
+       ((net)->ipv4.sysctl_fwmark_reflect ? (mark) : 0)
+
 static inline bool ip_is_fragment(const struct iphdr *iph)
 {
        return (iph->frag_off & htons(IP_MF | IP_OFFSET)) != 0;
@@ -281,7 +280,7 @@ static inline bool ip_sk_use_pmtu(const struct sock *sk)
        return inet_sk(sk)->pmtudisc < IP_PMTUDISC_PROBE;
 }
 
-static inline bool ip_sk_local_df(const struct sock *sk)
+static inline bool ip_sk_ignore_df(const struct sock *sk)
 {
        return inet_sk(sk)->pmtudisc < IP_PMTUDISC_DO ||
               inet_sk(sk)->pmtudisc == IP_PMTUDISC_OMIT;
@@ -316,7 +315,7 @@ static inline void ip_select_ident(struct sk_buff *skb, struct dst_entry *dst, s
 {
        struct iphdr *iph = ip_hdr(skb);
 
-       if ((iph->frag_off & htons(IP_DF)) && !skb->local_df) {
+       if ((iph->frag_off & htons(IP_DF)) && !skb->ignore_df) {
                /* This is only to work around buggy Windows95/2000
                 * VJ compression implementations.  If the ID field
                 * does not change, they drop every other packet in
@@ -332,7 +331,7 @@ static inline void ip_select_ident_more(struct sk_buff *skb, struct dst_entry *d
 {
        struct iphdr *iph = ip_hdr(skb);
 
-       if ((iph->frag_off & htons(IP_DF)) && !skb->local_df) {
+       if ((iph->frag_off & htons(IP_DF)) && !skb->ignore_df) {
                if (sk && inet_sk(sk)->inet_daddr) {
                        iph->id = htons(inet_sk(sk)->inet_id);
                        inet_sk(sk)->inet_id += 1 + more;
@@ -342,6 +341,12 @@ static inline void ip_select_ident_more(struct sk_buff *skb, struct dst_entry *d
                __ip_select_ident(iph, dst, more);
 }
 
+static inline __wsum inet_compute_pseudo(struct sk_buff *skb, int proto)
+{
+       return csum_tcpudp_nofold(ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
+                                 skb->len, proto, 0);
+}
+
 /*
  *     Map a multicast IP onto multicast MAC for type ethernet.
  */
index 9e3c540c1b110c71b65003a6aac22cc6c333be5a..8ac5c21f84563faeda028606034f2c69d23dc2a6 100644 (file)
@@ -41,6 +41,13 @@ __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
                        __wsum csum);
 #endif
 
+static inline __wsum ip6_compute_pseudo(struct sk_buff *skb, int proto)
+{
+       return ~csum_unfold(csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
+                                           &ipv6_hdr(skb)->daddr,
+                                           skb->len, proto, 0));
+}
+
 static __inline__ __sum16 tcp_v6_check(int len,
                                   const struct in6_addr *saddr,
                                   const struct in6_addr *daddr,
index 6c4f5eac98e7be133af4b868507f0d217ac05c1b..38e41e4d099850c63b16dfbf8fbff160f7face1a 100644 (file)
@@ -185,7 +185,7 @@ static inline bool ip6_sk_accept_pmtu(const struct sock *sk)
               inet6_sk(sk)->pmtudisc != IPV6_PMTUDISC_OMIT;
 }
 
-static inline bool ip6_sk_local_df(const struct sock *sk)
+static inline bool ip6_sk_ignore_df(const struct sock *sk)
 {
        return inet6_sk(sk)->pmtudisc < IPV6_PMTUDISC_DO ||
               inet6_sk(sk)->pmtudisc == IPV6_PMTUDISC_OMIT;
index d640925bc4543bdfb30bf15d164bececbda7e798..ba810d0546bc636319cf896b460d4c50cfcc7092 100644 (file)
@@ -113,6 +113,9 @@ struct frag_hdr {
 #define        IP6_MF          0x0001
 #define        IP6_OFFSET      0xFFF8
 
+#define IP6_REPLY_MARK(net, mark) \
+       ((net)->ipv6.sysctl.fwmark_reflect ? (mark) : 0)
+
 #include <net/sock.h>
 
 /* sysctls */
@@ -583,6 +586,11 @@ static inline bool ipv6_addr_orchid(const struct in6_addr *a)
        return (a->s6_addr32[0] & htonl(0xfffffff0)) == htonl(0x20010010);
 }
 
+static inline bool ipv6_addr_is_multicast(const struct in6_addr *addr)
+{
+       return (addr->s6_addr32[0] & htonl(0xFF000000)) == htonl(0xFF000000);
+}
+
 static inline void ipv6_addr_set_v4mapped(const __be32 addr,
                                          struct in6_addr *v4mapped)
 {
@@ -664,6 +672,20 @@ void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt);
 
 int ip6_dst_hoplimit(struct dst_entry *dst);
 
+static inline int ip6_sk_dst_hoplimit(struct ipv6_pinfo *np, struct flowi6 *fl6,
+                                     struct dst_entry *dst)
+{
+       int hlimit;
+
+       if (ipv6_addr_is_multicast(&fl6->daddr))
+               hlimit = np->mcast_hops;
+       else
+               hlimit = np->hop_limit;
+       if (hlimit < 0)
+               hlimit = ip6_dst_hoplimit(dst);
+       return hlimit;
+}
+
 /*
  *     Header manipulation
  */
index 8248e3909fdf7d8531890e15bc3f18c5b90ac95f..451c1bf00df93317a358d4576ebee0c423828889 100644 (file)
@@ -1202,14 +1202,18 @@ struct ieee80211_vif *wdev_to_ieee80211_vif(struct wireless_dev *wdev);
  *     fall back to software crypto. Note that this flag deals only with
  *     RX, if your crypto engine can't deal with TX you can also set the
  *     %IEEE80211_KEY_FLAG_SW_MGMT_TX flag to encrypt such frames in SW.
+ * @IEEE80211_KEY_FLAG_GENERATE_IV_MGMT: This flag should be set by the
+ *     driver for a CCMP key to indicate that is requires IV generation
+ *     only for managment frames (MFP).
  */
 enum ieee80211_key_flags {
-       IEEE80211_KEY_FLAG_GENERATE_IV  = 1<<1,
-       IEEE80211_KEY_FLAG_GENERATE_MMIC= 1<<2,
-       IEEE80211_KEY_FLAG_PAIRWISE     = 1<<3,
-       IEEE80211_KEY_FLAG_SW_MGMT_TX   = 1<<4,
-       IEEE80211_KEY_FLAG_PUT_IV_SPACE = 1<<5,
-       IEEE80211_KEY_FLAG_RX_MGMT      = 1<<6,
+       IEEE80211_KEY_FLAG_GENERATE_IV_MGMT     = BIT(0),
+       IEEE80211_KEY_FLAG_GENERATE_IV          = BIT(1),
+       IEEE80211_KEY_FLAG_GENERATE_MMIC        = BIT(2),
+       IEEE80211_KEY_FLAG_PAIRWISE             = BIT(3),
+       IEEE80211_KEY_FLAG_SW_MGMT_TX           = BIT(4),
+       IEEE80211_KEY_FLAG_PUT_IV_SPACE         = BIT(5),
+       IEEE80211_KEY_FLAG_RX_MGMT              = BIT(6),
 };
 
 /**
@@ -1555,6 +1559,12 @@ struct ieee80211_tx_control {
  *     for a single active channel while using channel contexts. When support
  *     is not enabled the default action is to disconnect when getting the
  *     CSA frame.
+ *
+ * @IEEE80211_HW_CHANGE_RUNNING_CHANCTX: The hardware can change a
+ *     channel context on-the-fly.  This is needed for channel switch
+ *     on single-channel hardware.  It can also be used as an
+ *     optimization in certain channel switch cases with
+ *     multi-channel.
  */
 enum ieee80211_hw_flags {
        IEEE80211_HW_HAS_RATE_CONTROL                   = 1<<0,
@@ -1586,6 +1596,7 @@ enum ieee80211_hw_flags {
        IEEE80211_HW_TIMING_BEACON_ONLY                 = 1<<26,
        IEEE80211_HW_SUPPORTS_HT_CCK_RATES              = 1<<27,
        IEEE80211_HW_CHANCTX_STA_CSA                    = 1<<28,
+       IEEE80211_HW_CHANGE_RUNNING_CHANCTX             = 1<<29,
 };
 
 /**
@@ -2609,6 +2620,7 @@ enum ieee80211_roc_type {
  *     of queues to flush, which is useful if different virtual interfaces
  *     use different hardware queues; it may also indicate all queues.
  *     If the parameter @drop is set to %true, pending frames may be dropped.
+ *     Note that vif can be NULL.
  *     The callback can sleep.
  *
  * @channel_switch: Drivers that need (or want) to offload the channel
@@ -2871,7 +2883,8 @@ struct ieee80211_ops {
                             struct netlink_callback *cb,
                             void *data, int len);
 #endif
-       void (*flush)(struct ieee80211_hw *hw, u32 queues, bool drop);
+       void (*flush)(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+                     u32 queues, bool drop);
        void (*channel_switch)(struct ieee80211_hw *hw,
                               struct ieee80211_channel_switch *ch_switch);
        int (*set_antenna)(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant);
@@ -4576,7 +4589,9 @@ conf_is_ht40(struct ieee80211_conf *conf)
 static inline bool
 conf_is_ht(struct ieee80211_conf *conf)
 {
-       return conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT;
+       return (conf->chandef.width != NL80211_CHAN_WIDTH_5) &&
+               (conf->chandef.width != NL80211_CHAN_WIDTH_10) &&
+               (conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT);
 }
 
 static inline enum nl80211_iftype
index bc4118ede5b56d9a115caca01c211a3b94b7c8e2..361d26077196678af5bd7ce217715a2cc847f2af 100644 (file)
@@ -379,15 +379,8 @@ net_ieee802154_lowpan(struct net *net)
 {
        return &net->ieee802154_lowpan;
 }
-#else
-static inline struct netns_ieee802154_lowpan *
-net_ieee802154_lowpan(struct net *net)
-{
-       return NULL;
-}
 #endif
 
-
 /* For callers who don't really care about whether it's IPv4 or IPv6 */
 static inline void rt_genid_bump_all(struct net *net)
 {
index e6bc14d8fa9a9a4b324fac9df5a47e64277f2aa8..7ee6ce6564aecc6b98eb6247e6d6c6263a3e130c 100644 (file)
@@ -72,21 +72,23 @@ static inline void nft_data_debug(const struct nft_data *data)
  *     struct nft_ctx - nf_tables rule/set context
  *
  *     @net: net namespace
- *     @skb: netlink skb
- *     @nlh: netlink message header
  *     @afi: address family info
  *     @table: the table the chain is contained in
  *     @chain: the chain the rule is contained in
  *     @nla: netlink attributes
+ *     @portid: netlink portID of the original message
+ *     @seq: netlink sequence number
+ *     @report: notify via unicast netlink message
  */
 struct nft_ctx {
        struct net                      *net;
-       const struct sk_buff            *skb;
-       const struct nlmsghdr           *nlh;
-       const struct nft_af_info        *afi;
-       const struct nft_table          *table;
-       const struct nft_chain          *chain;
+       struct nft_af_info              *afi;
+       struct nft_table                *table;
+       struct nft_chain                *chain;
        const struct nlattr * const     *nla;
+       u32                             portid;
+       u32                             seq;
+       bool                            report;
 };
 
 struct nft_data_desc {
@@ -145,6 +147,44 @@ struct nft_set_iter {
                              const struct nft_set_elem *elem);
 };
 
+/**
+ *     struct nft_set_desc - description of set elements
+ *
+ *     @klen: key length
+ *     @dlen: data length
+ *     @size: number of set elements
+ */
+struct nft_set_desc {
+       unsigned int            klen;
+       unsigned int            dlen;
+       unsigned int            size;
+};
+
+/**
+ *     enum nft_set_class - performance class
+ *
+ *     @NFT_LOOKUP_O_1: constant, O(1)
+ *     @NFT_LOOKUP_O_LOG_N: logarithmic, O(log N)
+ *     @NFT_LOOKUP_O_N: linear, O(N)
+ */
+enum nft_set_class {
+       NFT_SET_CLASS_O_1,
+       NFT_SET_CLASS_O_LOG_N,
+       NFT_SET_CLASS_O_N,
+};
+
+/**
+ *     struct nft_set_estimate - estimation of memory and performance
+ *                               characteristics
+ *
+ *     @size: required memory
+ *     @class: lookup performance class
+ */
+struct nft_set_estimate {
+       unsigned int            size;
+       enum nft_set_class      class;
+};
+
 /**
  *     struct nft_set_ops - nf_tables set operations
  *
@@ -174,7 +214,11 @@ struct nft_set_ops {
                                                struct nft_set_iter *iter);
 
        unsigned int                    (*privsize)(const struct nlattr * const nla[]);
+       bool                            (*estimate)(const struct nft_set_desc *desc,
+                                                   u32 features,
+                                                   struct nft_set_estimate *est);
        int                             (*init)(const struct nft_set *set,
+                                               const struct nft_set_desc *desc,
                                                const struct nlattr * const nla[]);
        void                            (*destroy)(const struct nft_set *set);
 
@@ -194,6 +238,8 @@ void nft_unregister_set(struct nft_set_ops *ops);
  *     @name: name of the set
  *     @ktype: key type (numeric type defined by userspace, not used in the kernel)
  *     @dtype: data type (verdict or numeric type defined by userspace)
+ *     @size: maximum set size
+ *     @nelems: number of elements
  *     @ops: set ops
  *     @flags: set flags
  *     @klen: key length
@@ -206,6 +252,8 @@ struct nft_set {
        char                            name[IFNAMSIZ];
        u32                             ktype;
        u32                             dtype;
+       u32                             size;
+       u32                             nelems;
        /* runtime data below here */
        const struct nft_set_ops        *ops ____cacheline_aligned;
        u16                             flags;
@@ -222,6 +270,8 @@ static inline void *nft_set_priv(const struct nft_set *set)
 
 struct nft_set *nf_tables_set_lookup(const struct nft_table *table,
                                     const struct nlattr *nla);
+struct nft_set *nf_tables_set_lookup_byid(const struct net *net,
+                                         const struct nlattr *nla);
 
 /**
  *     struct nft_set_binding - nf_tables set binding
@@ -341,18 +391,75 @@ struct nft_rule {
 };
 
 /**
- *     struct nft_rule_trans - nf_tables rule update in transaction
+ *     struct nft_trans - nf_tables object update in transaction
  *
+ *     @rcu_head: rcu head to defer release of transaction data
  *     @list: used internally
- *     @ctx: rule context
- *     @rule: rule that needs to be updated
+ *     @msg_type: message type
+ *     @ctx: transaction context
+ *     @data: internal information related to the transaction
  */
-struct nft_rule_trans {
+struct nft_trans {
+       struct rcu_head                 rcu_head;
        struct list_head                list;
+       int                             msg_type;
        struct nft_ctx                  ctx;
+       char                            data[0];
+};
+
+struct nft_trans_rule {
        struct nft_rule                 *rule;
 };
 
+#define nft_trans_rule(trans)  \
+       (((struct nft_trans_rule *)trans->data)->rule)
+
+struct nft_trans_set {
+       struct nft_set  *set;
+       u32             set_id;
+};
+
+#define nft_trans_set(trans)   \
+       (((struct nft_trans_set *)trans->data)->set)
+#define nft_trans_set_id(trans)        \
+       (((struct nft_trans_set *)trans->data)->set_id)
+
+struct nft_trans_chain {
+       bool            update;
+       char            name[NFT_CHAIN_MAXNAMELEN];
+       struct nft_stats __percpu *stats;
+       u8              policy;
+};
+
+#define nft_trans_chain_update(trans)  \
+       (((struct nft_trans_chain *)trans->data)->update)
+#define nft_trans_chain_name(trans)    \
+       (((struct nft_trans_chain *)trans->data)->name)
+#define nft_trans_chain_stats(trans)   \
+       (((struct nft_trans_chain *)trans->data)->stats)
+#define nft_trans_chain_policy(trans)  \
+       (((struct nft_trans_chain *)trans->data)->policy)
+
+struct nft_trans_table {
+       bool            update;
+       bool            enable;
+};
+
+#define nft_trans_table_update(trans)  \
+       (((struct nft_trans_table *)trans->data)->update)
+#define nft_trans_table_enable(trans)  \
+       (((struct nft_trans_table *)trans->data)->enable)
+
+struct nft_trans_elem {
+       struct nft_set          *set;
+       struct nft_set_elem     elem;
+};
+
+#define nft_trans_elem_set(trans)      \
+       (((struct nft_trans_elem *)trans->data)->set)
+#define nft_trans_elem(trans)  \
+       (((struct nft_trans_elem *)trans->data)->elem)
+
 static inline struct nft_expr *nft_expr_first(const struct nft_rule *rule)
 {
        return (struct nft_expr *)&rule->data[0];
@@ -385,6 +492,7 @@ static inline void *nft_userdata(const struct nft_rule *rule)
 
 enum nft_chain_flags {
        NFT_BASE_CHAIN                  = 0x1,
+       NFT_CHAIN_INACTIVE              = 0x2,
 };
 
 /**
diff --git a/include/net/netfilter/nft_meta.h b/include/net/netfilter/nft_meta.h
new file mode 100644 (file)
index 0000000..0ee47c3
--- /dev/null
@@ -0,0 +1,36 @@
+#ifndef _NFT_META_H_
+#define _NFT_META_H_
+
+struct nft_meta {
+       enum nft_meta_keys      key:8;
+       union {
+               enum nft_registers      dreg:8;
+               enum nft_registers      sreg:8;
+       };
+};
+
+extern const struct nla_policy nft_meta_policy[];
+
+int nft_meta_get_init(const struct nft_ctx *ctx,
+                     const struct nft_expr *expr,
+                     const struct nlattr * const tb[]);
+
+int nft_meta_set_init(const struct nft_ctx *ctx,
+                     const struct nft_expr *expr,
+                     const struct nlattr * const tb[]);
+
+int nft_meta_get_dump(struct sk_buff *skb,
+                     const struct nft_expr *expr);
+
+int nft_meta_set_dump(struct sk_buff *skb,
+                     const struct nft_expr *expr);
+
+void nft_meta_get_eval(const struct nft_expr *expr,
+                      struct nft_data data[NFT_REG_MAX + 1],
+                      const struct nft_pktinfo *pkt);
+
+void nft_meta_set_eval(const struct nft_expr *expr,
+                      struct nft_data data[NFT_REG_MAX + 1],
+                      const struct nft_pktinfo *pkt);
+
+#endif
index 80f500a29498e1fc9b8892e5c66be6bd02362eaa..aec5e12f9f19f1a6c506e47f60cc3056d7ce2a3d 100644 (file)
@@ -20,6 +20,11 @@ struct local_ports {
        int             range[2];
 };
 
+struct ping_group_range {
+       seqlock_t       lock;
+       kgid_t          range[2];
+};
+
 struct netns_ipv4 {
 #ifdef CONFIG_SYSCTL
        struct ctl_table_header *forw_hdr;
@@ -66,16 +71,23 @@ struct netns_ipv4 {
        int sysctl_icmp_ratemask;
        int sysctl_icmp_errors_use_inbound_ifaddr;
 
-       struct local_ports sysctl_local_ports;
+       struct local_ports ip_local_ports;
 
        int sysctl_tcp_ecn;
        int sysctl_ip_no_pmtu_disc;
        int sysctl_ip_fwd_use_pmtu;
 
-       kgid_t sysctl_ping_group_range[2];
+       int sysctl_fwmark_reflect;
+       int sysctl_tcp_fwmark_accept;
+
+       struct ping_group_range ping_group_range;
 
        atomic_t dev_addr_genid;
 
+#ifdef CONFIG_SYSCTL
+       unsigned long *sysctl_local_reserved_ports;
+#endif
+
 #ifdef CONFIG_IP_MROUTE
 #ifndef CONFIG_IP_MROUTE_MULTIPLE_TABLES
        struct mr_table         *mrt;
index 21edaf1f79161535af7ae1ae3ae7535ff1a236e3..19d3446e59d2555639e9553b1958d7354792af1e 100644 (file)
@@ -30,6 +30,7 @@ struct netns_sysctl_ipv6 {
        int flowlabel_consistency;
        int icmpv6_time;
        int anycast_src_echo_reply;
+       int fwmark_reflect;
 };
 
 struct netns_ipv6 {
index a2441fb1428f3f2e181df63319ca2b3fdc15dc4e..6da46dcf1049789f492cefd9472d0df84d4db91d 100644 (file)
@@ -136,7 +136,7 @@ tcf_exts_exec(struct sk_buff *skb, struct tcf_exts *exts,
 
 int tcf_exts_validate(struct net *net, struct tcf_proto *tp,
                      struct nlattr **tb, struct nlattr *rate_tlv,
-                     struct tcf_exts *exts);
+                     struct tcf_exts *exts, bool ovr);
 void tcf_exts_destroy(struct tcf_proto *tp, struct tcf_exts *exts);
 void tcf_exts_change(struct tcf_proto *tp, struct tcf_exts *dst,
                     struct tcf_exts *src);
index 75fc1f5a948d685fcfff12e04cc6b85e194cd541..259992444e80ae0b88eaec4ff345d23fd8f81c75 100644 (file)
@@ -131,6 +131,11 @@ struct regulatory_request {
  *     all country IE information processed by the regulatory core. This will
  *     override %REGULATORY_COUNTRY_IE_FOLLOW_POWER as all country IEs will
  *     be ignored.
+ * @REGULATORY_ENABLE_RELAX_NO_IR: for devices that wish to allow the
+ *      NO_IR relaxation, which enables transmissions on channels on which
+ *      otherwise initiating radiation is not allowed. This will enable the
+ *      relaxations enabled under the CFG80211_REG_RELAX_NO_IR configuration
+ *      option
  */
 enum ieee80211_regulatory_flags {
        REGULATORY_CUSTOM_REG                   = BIT(0),
@@ -138,6 +143,7 @@ enum ieee80211_regulatory_flags {
        REGULATORY_DISABLE_BEACON_HINTS         = BIT(2),
        REGULATORY_COUNTRY_IE_FOLLOW_POWER      = BIT(3),
        REGULATORY_COUNTRY_IE_IGNORE            = BIT(4),
+       REGULATORY_ENABLE_RELAX_NO_IR           = BIT(5),
 };
 
 struct ieee80211_freq_range {
index d062f81c692f1ee3e61ba1a06bd27e3a9edb761a..624f9857c83e3d7f2987ef95ecc410ad6f8c744f 100644 (file)
@@ -199,7 +199,7 @@ struct tcf_proto_ops {
        int                     (*change)(struct net *net, struct sk_buff *,
                                        struct tcf_proto*, unsigned long,
                                        u32 handle, struct nlattr **,
-                                       unsigned long *);
+                                       unsigned long *, bool);
        int                     (*delete)(struct tcf_proto*, unsigned long);
        void                    (*walk)(struct tcf_proto*, struct tcf_walker *arg);
 
index 71596261fa997ec7014b77f0bbee9b47b6146493..f1f27fdbb0d5738d6f3f3fbb93a79d240a1129b5 100644 (file)
@@ -116,51 +116,49 @@ struct linux_xfrm_mib {
        unsigned long   mibs[LINUX_MIB_XFRMMAX];
 };
 
-#define SNMP_ARRAY_SZ 1
-
 #define DEFINE_SNMP_STAT(type, name)   \
-       __typeof__(type) __percpu *name[SNMP_ARRAY_SZ]
+       __typeof__(type) __percpu *name
 #define DEFINE_SNMP_STAT_ATOMIC(type, name)    \
        __typeof__(type) *name
 #define DECLARE_SNMP_STAT(type, name)  \
-       extern __typeof__(type) __percpu *name[SNMP_ARRAY_SZ]
+       extern __typeof__(type) __percpu *name
 
 #define SNMP_INC_STATS_BH(mib, field)  \
-                       __this_cpu_inc(mib[0]->mibs[field])
+                       __this_cpu_inc(mib->mibs[field])
 
 #define SNMP_INC_STATS_USER(mib, field)        \
-                       this_cpu_inc(mib[0]->mibs[field])
+                       this_cpu_inc(mib->mibs[field])
 
 #define SNMP_INC_STATS_ATOMIC_LONG(mib, field) \
                        atomic_long_inc(&mib->mibs[field])
 
 #define SNMP_INC_STATS(mib, field)     \
-                       this_cpu_inc(mib[0]->mibs[field])
+                       this_cpu_inc(mib->mibs[field])
 
 #define SNMP_DEC_STATS(mib, field)     \
-                       this_cpu_dec(mib[0]->mibs[field])
+                       this_cpu_dec(mib->mibs[field])
 
 #define SNMP_ADD_STATS_BH(mib, field, addend)  \
-                       __this_cpu_add(mib[0]->mibs[field], addend)
+                       __this_cpu_add(mib->mibs[field], addend)
 
 #define SNMP_ADD_STATS_USER(mib, field, addend)        \
-                       this_cpu_add(mib[0]->mibs[field], addend)
+                       this_cpu_add(mib->mibs[field], addend)
 
 #define SNMP_ADD_STATS(mib, field, addend)     \
-                       this_cpu_add(mib[0]->mibs[field], addend)
+                       this_cpu_add(mib->mibs[field], addend)
 /*
- * Use "__typeof__(*mib[0]) *ptr" instead of "__typeof__(mib[0]) ptr"
+ * Use "__typeof__(*mib) *ptr" instead of "__typeof__(mib) ptr"
  * to make @ptr a non-percpu pointer.
  */
 #define SNMP_UPD_PO_STATS(mib, basefield, addend)      \
        do { \
-               __typeof__(*mib[0]->mibs) *ptr = mib[0]->mibs;  \
+               __typeof__(*mib->mibs) *ptr = mib->mibs;        \
                this_cpu_inc(ptr[basefield##PKTS]);             \
                this_cpu_add(ptr[basefield##OCTETS], addend);   \
        } while (0)
 #define SNMP_UPD_PO_STATS_BH(mib, basefield, addend)   \
        do { \
-               __typeof__(*mib[0]->mibs) *ptr = mib[0]->mibs;  \
+               __typeof__(*mib->mibs) *ptr = mib->mibs;        \
                __this_cpu_inc(ptr[basefield##PKTS]);           \
                __this_cpu_add(ptr[basefield##OCTETS], addend); \
        } while (0)
@@ -170,7 +168,7 @@ struct linux_xfrm_mib {
 
 #define SNMP_ADD_STATS64_BH(mib, field, addend)                        \
        do {                                                            \
-               __typeof__(*mib[0]) *ptr = __this_cpu_ptr((mib)[0]);    \
+               __typeof__(*mib) *ptr = __this_cpu_ptr(mib);            \
                u64_stats_update_begin(&ptr->syncp);                    \
                ptr->mibs[field] += addend;                             \
                u64_stats_update_end(&ptr->syncp);                      \
@@ -191,8 +189,8 @@ struct linux_xfrm_mib {
 #define SNMP_INC_STATS64(mib, field) SNMP_ADD_STATS64(mib, field, 1)
 #define SNMP_UPD_PO_STATS64_BH(mib, basefield, addend)                 \
        do {                                                            \
-               __typeof__(*mib[0]) *ptr;                               \
-               ptr = __this_cpu_ptr((mib)[0]);                         \
+               __typeof__(*mib) *ptr;                                  \
+               ptr = __this_cpu_ptr(mib);                              \
                u64_stats_update_begin(&ptr->syncp);                    \
                ptr->mibs[basefield##PKTS]++;                           \
                ptr->mibs[basefield##OCTETS] += addend;                 \
index 8338a14e48053d853a57af674f7edca1e085fdc5..21569cf456ed54459a537e5a6cf02349a2a8413c 100644 (file)
@@ -2255,6 +2255,11 @@ int sock_get_timestampns(struct sock *, struct timespec __user *);
 int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len, int level,
                       int type);
 
+bool sk_ns_capable(const struct sock *sk,
+                  struct user_namespace *user_ns, int cap);
+bool sk_capable(const struct sock *sk, int cap);
+bool sk_net_capable(const struct sock *sk, int cap);
+
 /*
  *     Enable debug/info messages
  */
index 163d2b467d78982be34b278411e306a49444fc31..e80abe4486cbd252eb556997978fc804d389db7e 100644 (file)
@@ -220,8 +220,6 @@ void tcp_time_wait(struct sock *sk, int state, int timeo);
 #define        TFO_SERVER_ENABLE       2
 #define        TFO_CLIENT_NO_COOKIE    4       /* Data in SYN w/o cookie option */
 
-/* Process SYN data but skip cookie validation */
-#define        TFO_SERVER_COOKIE_NOT_CHKED     0x100
 /* Accept SYN data w/o any cookie option */
 #define        TFO_SERVER_COOKIE_NOT_REQD      0x200
 
@@ -230,10 +228,6 @@ void tcp_time_wait(struct sock *sk, int state, int timeo);
  */
 #define        TFO_SERVER_WO_SOCKOPT1  0x400
 #define        TFO_SERVER_WO_SOCKOPT2  0x800
-/* Always create TFO child sockets on a TFO listener even when
- * cookie/data not present. (For testing purpose!)
- */
-#define        TFO_SERVER_ALWAYS       0x1000
 
 extern struct inet_timewait_death_row tcp_death_row;
 
@@ -796,7 +790,7 @@ struct tcp_congestion_ops {
        /* return slow start threshold (required) */
        u32 (*ssthresh)(struct sock *sk);
        /* do new cwnd calculation (required) */
-       void (*cong_avoid)(struct sock *sk, u32 ack, u32 acked, u32 in_flight);
+       void (*cong_avoid)(struct sock *sk, u32 ack, u32 acked);
        /* call before changing ca_state (optional) */
        void (*set_state)(struct sock *sk, u8 new_state);
        /* call when cwnd event occurs (optional) */
@@ -828,7 +822,7 @@ void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w);
 
 extern struct tcp_congestion_ops tcp_init_congestion_ops;
 u32 tcp_reno_ssthresh(struct sock *sk);
-void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked, u32 in_flight);
+void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked);
 extern struct tcp_congestion_ops tcp_reno;
 
 static inline void tcp_set_ca_state(struct sock *sk, const u8 ca_state)
@@ -974,7 +968,30 @@ static inline u32 tcp_wnd_end(const struct tcp_sock *tp)
 {
        return tp->snd_una + tp->snd_wnd;
 }
-bool tcp_is_cwnd_limited(const struct sock *sk, u32 in_flight);
+
+/* We follow the spirit of RFC2861 to validate cwnd but implement a more
+ * flexible approach. The RFC suggests cwnd should not be raised unless
+ * it was fully used previously. And that's exactly what we do in
+ * congestion avoidance mode. But in slow start we allow cwnd to grow
+ * as long as the application has used half the cwnd.
+ * Example :
+ *    cwnd is 10 (IW10), but application sends 9 frames.
+ *    We allow cwnd to reach 18 when all frames are ACKed.
+ * This check is safe because it's as aggressive as slow start which already
+ * risks 100% overshoot. The advantage is that we discourage application to
+ * either send more filler packets or data to artificially blow up the cwnd
+ * usage, and allow application-limited process to probe bw more aggressively.
+ */
+static inline bool tcp_is_cwnd_limited(const struct sock *sk)
+{
+       const struct tcp_sock *tp = tcp_sk(sk);
+
+       /* If in slow start, ensure cwnd grows to twice what was ACKed. */
+       if (tp->snd_cwnd <= tp->snd_ssthresh)
+               return tp->snd_cwnd < 2 * tp->max_packets_out;
+
+       return tp->is_cwnd_limited;
+}
 
 static inline void tcp_check_probe_timer(struct sock *sk)
 {
@@ -1102,6 +1119,9 @@ static inline void tcp_openreq_init(struct request_sock *req,
        ireq->ir_num = ntohs(tcp_hdr(skb)->dest);
 }
 
+extern void tcp_openreq_init_rwin(struct request_sock *req,
+                                 struct sock *sk, struct dst_entry *dst);
+
 void tcp_enter_memory_pressure(struct sock *sk);
 
 static inline int keepalive_intvl_when(const struct tcp_sock *tp)
@@ -1311,8 +1331,10 @@ void tcp_free_fastopen_req(struct tcp_sock *tp);
 
 extern struct tcp_fastopen_context __rcu *tcp_fastopen_ctx;
 int tcp_fastopen_reset_cipher(void *key, unsigned int len);
-void tcp_fastopen_cookie_gen(__be32 src, __be32 dst,
-                            struct tcp_fastopen_cookie *foc);
+bool tcp_try_fastopen(struct sock *sk, struct sk_buff *skb,
+                     struct request_sock *req,
+                     struct tcp_fastopen_cookie *foc,
+                     struct dst_entry *dst);
 void tcp_fastopen_init_key_once(bool publish);
 #define TCP_FASTOPEN_KEY_LENGTH 16
 
diff --git a/include/net/tso.h b/include/net/tso.h
new file mode 100644 (file)
index 0000000..47e5444
--- /dev/null
@@ -0,0 +1,20 @@
+#ifndef _TSO_H
+#define _TSO_H
+
+#include <net/ip.h>
+
+struct tso_t {
+       int next_frag_idx;
+       void *data;
+       size_t size;
+       u16 ip_id;
+       u32 tcp_seq;
+};
+
+int tso_count_descs(struct sk_buff *skb);
+void tso_build_hdr(struct sk_buff *skb, char *hdr, struct tso_t *tso,
+                  int size, bool is_last);
+void tso_build_data(struct sk_buff *skb, struct tso_t *tso, int size);
+void tso_start(struct sk_buff *skb, struct tso_t *tso);
+
+#endif /* _TSO_H */
index 5deef1ae78c964608d629d29d5628dfef52fdf6e..7bb4084b1bd0c036250e002c59ef35613db3aad7 100644 (file)
@@ -33,7 +33,7 @@ void vxlan_sock_release(struct vxlan_sock *vs);
 int vxlan_xmit_skb(struct vxlan_sock *vs,
                   struct rtable *rt, struct sk_buff *skb,
                   __be32 src, __be32 dst, __u8 tos, __u8 ttl, __be16 df,
-                  __be16 src_port, __be16 dst_port, __be32 vni);
+                  __be16 src_port, __be16 dst_port, __be32 vni, bool xnet);
 
 __be16 vxlan_src_port(__u16 port_min, __u16 port_max, struct sk_buff *skb);
 
index 010ea89eeb0e407a85a052e6b8905dedb6ac5991..6a1a0245474feee8f32fe040e56f0044f53a20f4 100644 (file)
@@ -16,15 +16,6 @@ struct mpage_da_data;
 struct ext4_map_blocks;
 struct extent_status;
 
-/* shim until we merge in the xfs_collapse_range branch */
-#ifndef FALLOC_FL_COLLAPSE_RANGE
-#define FALLOC_FL_COLLAPSE_RANGE       0x08
-#endif
-
-#ifndef FALLOC_FL_ZERO_RANGE
-#define FALLOC_FL_ZERO_RANGE           0x10
-#endif
-
 #define EXT4_I(inode) (container_of(inode, struct ext4_inode_info, vfs_inode))
 
 #define show_mballoc_flags(flags) __print_flags(flags, "|",    \
index 11fd51b413de25a6a2415c1724dee458d3314ddc..ed0b2c599a64f7d701117bf54ba6a4dcd56edb31 100644 (file)
@@ -25,7 +25,7 @@ struct module;
        { (1UL << TAINT_OOT_MODULE),            "O" },          \
        { (1UL << TAINT_FORCED_MODULE),         "F" },          \
        { (1UL << TAINT_CRAP),                  "C" },          \
-       { (1UL << TAINT_UNSIGNED_MODULE),       "X" })
+       { (1UL << TAINT_UNSIGNED_MODULE),       "E" })
 
 TRACE_EVENT(module_load,
 
index a9b13f8b3595107579ca6ea20421d53a9dd15017..7543b3e51331fcb38574e3f309713b3a6a2d31c0 100644 (file)
 #endif
 
 /*
- * fd "private" POSIX locks.
+ * Open File Description Locks
  *
- * Usually POSIX locks held by a process are released on *any* close and are
+ * Usually record locks held by a process are released on *any* close and are
  * not inherited across a fork().
  *
- * These cmd values will set locks that conflict with normal POSIX locks, but
- * are "owned" by the opened file, not the process. This means that they are
- * inherited across fork() like BSD (flock) locks, and they are only released
- * automatically when the last reference to the the open file against which
- * they were acquired is put.
+ * These cmd values will set locks that conflict with process-associated
+ * record  locks, but are "owned" by the open file description, not the
+ * process. This means that they are inherited across fork() like BSD (flock)
+ * locks, and they are only released automatically when the last reference to
+ * the the open file against which they were acquired is put.
  */
-#define F_GETLKP       36
-#define F_SETLKP       37
-#define F_SETLKPW      38
+#define F_OFD_GETLK    36
+#define F_OFD_SETLK    37
+#define F_OFD_SETLKW   38
 
 #define F_OWNER_TID    0
 #define F_OWNER_PID    1
index 11917f747cb401be5b7dc8ab788fa5d0d4e8e47c..dfa4c860ccefd1af49a3fff7a0a0328db295c647 100644 (file)
@@ -373,6 +373,14 @@ enum {
  */
 #define AUDIT_MESSAGE_TEXT_MAX 8560
 
+/* Multicast Netlink socket groups (default up to 32) */
+enum audit_nlgrps {
+       AUDIT_NLGRP_NONE,       /* Group 0 not used */
+       AUDIT_NLGRP_READLOG,    /* "best effort" read only socket */
+       __AUDIT_NLGRP_MAX
+};
+#define AUDIT_NLGRP_MAX                (__AUDIT_NLGRP_MAX - 1)
+
 struct audit_status {
        __u32           mask;           /* Bit mask for valid entries */
        __u32           enabled;        /* 1 = enabled, 0 = disabled */
index 5d9d1d1407180a9291c0f986945e3a34f2ccf51e..41892f720057df2cc23f96c7d44d6ab2808fdfdd 100644 (file)
@@ -42,8 +42,8 @@
  * DAMAGE.
  */
 
-#ifndef CAN_H
-#define CAN_H
+#ifndef _UAPI_CAN_H
+#define _UAPI_CAN_H
 
 #include <linux/types.h>
 #include <linux/socket.h>
@@ -191,4 +191,4 @@ struct can_filter {
 
 #define CAN_INV_FILTER 0x20000000U /* to be set in can_filter.can_id */
 
-#endif /* CAN_H */
+#endif /* !_UAPI_CAN_H */
index 382251a1d21403acd817577d83c21f47d0389865..89ddb9dc9bdf7ca8bd191c9dedf7019f24573931 100644 (file)
@@ -41,8 +41,8 @@
  * DAMAGE.
  */
 
-#ifndef CAN_BCM_H
-#define CAN_BCM_H
+#ifndef _UAPI_CAN_BCM_H
+#define _UAPI_CAN_BCM_H
 
 #include <linux/types.h>
 #include <linux/can.h>
@@ -95,4 +95,4 @@ enum {
 #define TX_RESET_MULTI_IDX  0x0200
 #define RX_RTR_FRAME        0x0400
 
-#endif /* CAN_BCM_H */
+#endif /* !_UAPI_CAN_BCM_H */
index b632045453202074ada263866052bc2a806e85bc..c247446ab25a4e564a068ae97e154cf7972f0a1d 100644 (file)
@@ -41,8 +41,8 @@
  * DAMAGE.
  */
 
-#ifndef CAN_ERROR_H
-#define CAN_ERROR_H
+#ifndef _UAPI_CAN_ERROR_H
+#define _UAPI_CAN_ERROR_H
 
 #define CAN_ERR_DLC 8 /* dlc for error message frames */
 
 
 /* controller specific additional information / data[5..7] */
 
-#endif /* CAN_ERROR_H */
+#endif /* _UAPI_CAN_ERROR_H */
index 844c8964bdfee3a3f4a7308bf0fd832e82754a89..3e6184cf2f6dc5b2318f87f49db09ef119182683 100644 (file)
@@ -41,8 +41,8 @@
  * DAMAGE.
  */
 
-#ifndef CAN_GW_H
-#define CAN_GW_H
+#ifndef _UAPI_CAN_GW_H
+#define _UAPI_CAN_GW_H
 
 #include <linux/types.h>
 #include <linux/can.h>
@@ -200,4 +200,4 @@ enum {
  *         Beware of sending unpacked or aligned structs!
  */
 
-#endif
+#endif /* !_UAPI_CAN_GW_H */
index 7e2e1863db16e02fa15e1edc109adefda8236ba7..813d11f549774aadf5f3d87ba28be840e7f6e399 100644 (file)
@@ -15,8 +15,8 @@
  * GNU General Public License for more details.
  */
 
-#ifndef CAN_NETLINK_H
-#define CAN_NETLINK_H
+#ifndef _UAPI_CAN_NETLINK_H
+#define _UAPI_CAN_NETLINK_H
 
 #include <linux/types.h>
 
@@ -130,4 +130,4 @@ enum {
 
 #define IFLA_CAN_MAX   (__IFLA_CAN_MAX - 1)
 
-#endif /* CAN_NETLINK_H */
+#endif /* !_UAPI_CAN_NETLINK_H */
index c7d8c334e0ce26838c7cc611bd3ad1eb5a31a6c4..78ec76fd89a6ce4fe70161576ec0c01e5d6156d3 100644 (file)
@@ -42,8 +42,8 @@
  * DAMAGE.
  */
 
-#ifndef CAN_RAW_H
-#define CAN_RAW_H
+#ifndef _UAPI_CAN_RAW_H
+#define _UAPI_CAN_RAW_H
 
 #include <linux/can.h>
 
@@ -59,4 +59,4 @@ enum {
        CAN_RAW_FD_FRAMES,      /* allow CAN FD frames (default:off) */
 };
 
-#endif
+#endif /* !_UAPI_CAN_RAW_H */
index 154dd6d3c8fedaa54a04817567580b11727f2807..12c37a197d247ca980fef9c6e81ed0c067f27987 100644 (file)
@@ -347,7 +347,12 @@ struct vfs_cap_data {
 
 #define CAP_BLOCK_SUSPEND    36
 
-#define CAP_LAST_CAP         CAP_BLOCK_SUSPEND
+/* Allow reading the audit log via multicast netlink socket */
+
+#define CAP_AUDIT_READ         37
+
+
+#define CAP_LAST_CAP         CAP_AUDIT_READ
 
 #define cap_valid(x) ((x) >= 0 && (x) <= CAP_LAST_CAP)
 
index fd161e91b6d7e711270da030ed84b38f9b718f0c..d47d31d6fa0edcb3bd7c06a84cb8eb1a767afa3e 100644 (file)
@@ -846,6 +846,35 @@ struct ethtool_rxfh_indir {
        __u32   ring_index[0];
 };
 
+/**
+ * struct ethtool_rxfh - command to get/set RX flow hash indir or/and hash key.
+ * @cmd: Specific command number - %ETHTOOL_GRSSH or %ETHTOOL_SRSSH
+ * @rss_context: RSS context identifier.
+ * @indir_size: On entry, the array size of the user buffer, which may be zero.
+ *             On return from %ETHTOOL_GRSSH, the array size of the hardware
+ *             indirection table.
+ * @key_size:  On entry, the array size of the user buffer in bytes,
+ *             which may be zero.
+ *             On return from %ETHTOOL_GRSSH, the size of the RSS hash key.
+ * @rsvd:      Reserved for future extensions.
+ * @rss_config: RX ring/queue index for each hash value i.e., indirection table
+ *             of size @indir_size followed by hash key of size @key_size.
+ *
+ * For %ETHTOOL_GRSSH, a @indir_size and key_size of zero means that only the
+ * size should be returned.  For %ETHTOOL_SRSSH, a @indir_size of 0xDEADBEEF
+ * means that indir table setting is not requested and a @indir_size of zero
+ * means the indir table should be reset to default values.  This last feature
+ * is not supported by the original implementations.
+ */
+struct ethtool_rxfh {
+       __u32   cmd;
+       __u32   rss_context;
+       __u32   indir_size;
+       __u32   key_size;
+       __u32   rsvd[2];
+       __u32   rss_config[0];
+};
+
 /**
  * struct ethtool_rx_ntuple_flow_spec - specification for RX flow filter
  * @flow_type: Type of match to perform, e.g. %TCP_V4_FLOW
@@ -1118,6 +1147,9 @@ enum ethtool_sfeatures_retval_bits {
 #define ETHTOOL_GEEE           0x00000044 /* Get EEE settings */
 #define ETHTOOL_SEEE           0x00000045 /* Set EEE settings */
 
+#define ETHTOOL_GRSSH          0x00000046 /* Get RX flow hash configuration */
+#define ETHTOOL_SRSSH          0x00000047 /* Set RX flow hash configuration */
+
 /* compatibility with older code */
 #define SPARC_ETH_GSET         ETHTOOL_GSET
 #define SPARC_ETH_SSET         ETHTOOL_SSET
index 8eb9ccaa5b48124b716e5abe7741aef4ca711d9f..253b4d42cf2bb31517a8a159f5e1c37f13c075f2 100644 (file)
@@ -130,7 +130,8 @@ struct sock_fprog { /* Required for SO_ATTACH_FILTER. */
 #define SKF_AD_VLAN_TAG        44
 #define SKF_AD_VLAN_TAG_PRESENT 48
 #define SKF_AD_PAY_OFFSET      52
-#define SKF_AD_MAX     56
+#define SKF_AD_RANDOM  56
+#define SKF_AD_MAX     60
 #define SKF_NET_OFF   (-0x100000)
 #define SKF_LL_OFF    (-0x200000)
 
index 0d36909c3aefa27f26aaf60d1d356d462ae94d0e..1086cd9f675473b21f2c316eac16d591743af777 100644 (file)
  *  Define max and min legal sizes.  The frame sizes do not include
  *  4 byte FCS/CRC (frame check sequence).
  */
-#define FDDI_K_ALEN                    6               /* Octets in one FDDI address */
-#define FDDI_K_8022_HLEN       16              /* Total octets in 802.2 header */
-#define FDDI_K_SNAP_HLEN       21              /* Total octets in 802.2 SNAP header */
-#define FDDI_K_8022_ZLEN       16              /* Min octets in 802.2 frame sans FCS */
-#define FDDI_K_SNAP_ZLEN       21              /* Min octets in 802.2 SNAP frame sans FCS */
+#define FDDI_K_ALEN            6       /* Octets in one FDDI address */
+#define FDDI_K_8022_HLEN       16      /* Total octets in 802.2 header */
+#define FDDI_K_SNAP_HLEN       21      /* Total octets in 802.2 SNAP header */
+#define FDDI_K_8022_ZLEN       16      /* Min octets in 802.2 frame sans
+                                          FCS */
+#define FDDI_K_SNAP_ZLEN       21      /* Min octets in 802.2 SNAP frame sans
+                                          FCS */
 #define FDDI_K_8022_DLEN       4475    /* Max octets in 802.2 payload */
 #define FDDI_K_SNAP_DLEN       4470    /* Max octets in 802.2 SNAP payload */
-#define FDDI_K_LLC_ZLEN                13              /* Min octets in LLC frame sans FCS */
+#define FDDI_K_LLC_ZLEN                13      /* Min octets in LLC frame sans FCS */
 #define FDDI_K_LLC_LEN         4491    /* Max octets in LLC frame sans FCS */
+#define FDDI_K_OUI_LEN         3       /* Octets in OUI in 802.2 SNAP
+                                          header */
 
 /* Define FDDI Frame Control (FC) Byte values */
-#define FDDI_FC_K_VOID                                 0x00    
-#define FDDI_FC_K_NON_RESTRICTED_TOKEN 0x80    
-#define FDDI_FC_K_RESTRICTED_TOKEN             0xC0    
-#define FDDI_FC_K_SMT_MIN                              0x41
-#define FDDI_FC_K_SMT_MAX                              0x4F
-#define FDDI_FC_K_MAC_MIN                              0xC1
-#define FDDI_FC_K_MAC_MAX                              0xCF    
-#define FDDI_FC_K_ASYNC_LLC_MIN                        0x50
-#define FDDI_FC_K_ASYNC_LLC_DEF                        0x54
-#define FDDI_FC_K_ASYNC_LLC_MAX                        0x5F
-#define FDDI_FC_K_SYNC_LLC_MIN                 0xD0
-#define FDDI_FC_K_SYNC_LLC_MAX                 0xD7
-#define FDDI_FC_K_IMPLEMENTOR_MIN              0x60
-#define FDDI_FC_K_IMPLEMENTOR_MAX              0x6F
-#define FDDI_FC_K_RESERVED_MIN                 0x70
-#define FDDI_FC_K_RESERVED_MAX                 0x7F
+#define FDDI_FC_K_VOID                 0x00
+#define FDDI_FC_K_NON_RESTRICTED_TOKEN 0x80
+#define FDDI_FC_K_RESTRICTED_TOKEN     0xC0
+#define FDDI_FC_K_SMT_MIN              0x41
+#define FDDI_FC_K_SMT_MAX              0x4F
+#define FDDI_FC_K_MAC_MIN              0xC1
+#define FDDI_FC_K_MAC_MAX              0xCF
+#define FDDI_FC_K_ASYNC_LLC_MIN                0x50
+#define FDDI_FC_K_ASYNC_LLC_DEF                0x54
+#define FDDI_FC_K_ASYNC_LLC_MAX                0x5F
+#define FDDI_FC_K_SYNC_LLC_MIN         0xD0
+#define FDDI_FC_K_SYNC_LLC_MAX         0xD7
+#define FDDI_FC_K_IMPLEMENTOR_MIN      0x60
+#define FDDI_FC_K_IMPLEMENTOR_MAX      0x6F
+#define FDDI_FC_K_RESERVED_MIN         0x70
+#define FDDI_FC_K_RESERVED_MAX         0x7F
 
 /* Define LLC and SNAP constants */
-#define FDDI_EXTENDED_SAP      0xAA
+#define FDDI_EXTENDED_SAP              0xAA
 #define FDDI_UI_CMD                    0x03
 
 /* Define 802.2 Type 1 header */
 struct fddi_8022_1_hdr {
-       __u8    dsap;                                   /* destination service access point */
-       __u8    ssap;                                   /* source service access point */
-       __u8    ctrl;                                   /* control byte #1 */
+       __u8    dsap;                   /* destination service access point */
+       __u8    ssap;                   /* source service access point */
+       __u8    ctrl;                   /* control byte #1 */
 } __attribute__((packed));
 
 /* Define 802.2 Type 2 header */
 struct fddi_8022_2_hdr {
-       __u8    dsap;                                   /* destination service access point */
-       __u8    ssap;                                   /* source service access point */
-       __u8    ctrl_1;                                 /* control byte #1 */
-       __u8    ctrl_2;                                 /* control byte #2 */
+       __u8    dsap;                   /* destination service access point */
+       __u8    ssap;                   /* source service access point */
+       __u8    ctrl_1;                 /* control byte #1 */
+       __u8    ctrl_2;                 /* control byte #2 */
 } __attribute__((packed));
 
 /* Define 802.2 SNAP header */
-#define FDDI_K_OUI_LEN 3
 struct fddi_snap_hdr {
-       __u8    dsap;                                   /* always 0xAA */
-       __u8    ssap;                                   /* always 0xAA */
-       __u8    ctrl;                                   /* always 0x03 */
+       __u8    dsap;                   /* always 0xAA */
+       __u8    ssap;                   /* always 0xAA */
+       __u8    ctrl;                   /* always 0x03 */
        __u8    oui[FDDI_K_OUI_LEN];    /* organizational universal id */
-       __be16  ethertype;                              /* packet type ID field */
+       __be16  ethertype;              /* packet type ID field */
 } __attribute__((packed));
 
 /* Define FDDI LLC frame header */
 struct fddihdr {
-       __u8    fc;                                             /* frame control */
-       __u8    daddr[FDDI_K_ALEN];             /* destination address */
-       __u8    saddr[FDDI_K_ALEN];             /* source address */
-       union
-               {
-               struct fddi_8022_1_hdr          llc_8022_1;
-               struct fddi_8022_2_hdr          llc_8022_2;
-               struct fddi_snap_hdr            llc_snap;
-               } hdr;
+       __u8    fc;                     /* frame control */
+       __u8    daddr[FDDI_K_ALEN];     /* destination address */
+       __u8    saddr[FDDI_K_ALEN];     /* source address */
+       union {
+               struct fddi_8022_1_hdr  llc_8022_1;
+               struct fddi_8022_2_hdr  llc_8022_2;
+               struct fddi_snap_hdr    llc_snap;
+       } hdr;
 } __attribute__((packed));
 
 
index bd24470d24a2c7a3145af21094e03f67b7ce0761..f4849525519c7a301f4202f68a8eea9fb0e9d296 100644 (file)
@@ -164,6 +164,7 @@ struct input_keymap_entry {
 #define INPUT_PROP_DIRECT              0x01    /* direct input devices */
 #define INPUT_PROP_BUTTONPAD           0x02    /* has button(s) under pad */
 #define INPUT_PROP_SEMI_MT             0x03    /* touch rectangle only */
+#define INPUT_PROP_TOPBUTTONPAD                0x04    /* softbuttons at top of pad */
 
 #define INPUT_PROP_MAX                 0x1f
 #define INPUT_PROP_CNT                 (INPUT_PROP_MAX + 1)
index c88ccbfda5f1b111a5fa43e1d1803bcccf95b521..2a88f645a5d821c47d7a53a05dc7a0e083a72342 100644 (file)
@@ -211,6 +211,29 @@ enum nft_set_flags {
        NFT_SET_MAP                     = 0x8,
 };
 
+/**
+ * enum nft_set_policies - set selection policy
+ *
+ * @NFT_SET_POL_PERFORMANCE: prefer high performance over low memory use
+ * @NFT_SET_POL_MEMORY: prefer low memory use over high performance
+ */
+enum nft_set_policies {
+       NFT_SET_POL_PERFORMANCE,
+       NFT_SET_POL_MEMORY,
+};
+
+/**
+ * enum nft_set_desc_attributes - set element description
+ *
+ * @NFTA_SET_DESC_SIZE: number of elements in set (NLA_U32)
+ */
+enum nft_set_desc_attributes {
+       NFTA_SET_DESC_UNSPEC,
+       NFTA_SET_DESC_SIZE,
+       __NFTA_SET_DESC_MAX
+};
+#define NFTA_SET_DESC_MAX      (__NFTA_SET_DESC_MAX - 1)
+
 /**
  * enum nft_set_attributes - nf_tables set netlink attributes
  *
@@ -221,6 +244,9 @@ enum nft_set_flags {
  * @NFTA_SET_KEY_LEN: key data length (NLA_U32)
  * @NFTA_SET_DATA_TYPE: mapping data type (NLA_U32)
  * @NFTA_SET_DATA_LEN: mapping data length (NLA_U32)
+ * @NFTA_SET_POLICY: selection policy (NLA_U32)
+ * @NFTA_SET_DESC: set description (NLA_NESTED)
+ * @NFTA_SET_ID: uniquely identifies a set in a transaction (NLA_U32)
  */
 enum nft_set_attributes {
        NFTA_SET_UNSPEC,
@@ -231,6 +257,9 @@ enum nft_set_attributes {
        NFTA_SET_KEY_LEN,
        NFTA_SET_DATA_TYPE,
        NFTA_SET_DATA_LEN,
+       NFTA_SET_POLICY,
+       NFTA_SET_DESC,
+       NFTA_SET_ID,
        __NFTA_SET_MAX
 };
 #define NFTA_SET_MAX           (__NFTA_SET_MAX - 1)
@@ -266,12 +295,14 @@ enum nft_set_elem_attributes {
  * @NFTA_SET_ELEM_LIST_TABLE: table of the set to be changed (NLA_STRING)
  * @NFTA_SET_ELEM_LIST_SET: name of the set to be changed (NLA_STRING)
  * @NFTA_SET_ELEM_LIST_ELEMENTS: list of set elements (NLA_NESTED: nft_set_elem_attributes)
+ * @NFTA_SET_ELEM_LIST_SET_ID: uniquely identifies a set in a transaction (NLA_U32)
  */
 enum nft_set_elem_list_attributes {
        NFTA_SET_ELEM_LIST_UNSPEC,
        NFTA_SET_ELEM_LIST_TABLE,
        NFTA_SET_ELEM_LIST_SET,
        NFTA_SET_ELEM_LIST_ELEMENTS,
+       NFTA_SET_ELEM_LIST_SET_ID,
        __NFTA_SET_ELEM_LIST_MAX
 };
 #define NFTA_SET_ELEM_LIST_MAX (__NFTA_SET_ELEM_LIST_MAX - 1)
@@ -457,12 +488,14 @@ enum nft_cmp_attributes {
  * @NFTA_LOOKUP_SET: name of the set where to look for (NLA_STRING)
  * @NFTA_LOOKUP_SREG: source register of the data to look for (NLA_U32: nft_registers)
  * @NFTA_LOOKUP_DREG: destination register (NLA_U32: nft_registers)
+ * @NFTA_LOOKUP_SET_ID: uniquely identifies a set in a transaction (NLA_U32)
  */
 enum nft_lookup_attributes {
        NFTA_LOOKUP_UNSPEC,
        NFTA_LOOKUP_SET,
        NFTA_LOOKUP_SREG,
        NFTA_LOOKUP_DREG,
+       NFTA_LOOKUP_SET_ID,
        __NFTA_LOOKUP_MAX
 };
 #define NFTA_LOOKUP_MAX                (__NFTA_LOOKUP_MAX - 1)
@@ -536,6 +569,8 @@ enum nft_exthdr_attributes {
  * @NFT_META_SECMARK: packet secmark (skb->secmark)
  * @NFT_META_NFPROTO: netfilter protocol
  * @NFT_META_L4PROTO: layer 4 protocol number
+ * @NFT_META_BRI_IIFNAME: packet input bridge interface name
+ * @NFT_META_BRI_OIFNAME: packet output bridge interface name
  */
 enum nft_meta_keys {
        NFT_META_LEN,
@@ -555,6 +590,8 @@ enum nft_meta_keys {
        NFT_META_SECMARK,
        NFT_META_NFPROTO,
        NFT_META_L4PROTO,
+       NFT_META_BRI_IIFNAME,
+       NFT_META_BRI_OIFNAME,
 };
 
 /**
index 1ba9d626aa833db91c462560f27054b30e91939d..406010d4def049d3880b49a0b4362e75b93c47c1 100644 (file)
@@ -1579,6 +1579,10 @@ enum nl80211_commands {
  * @NL80211_ATTR_TDLS_PEER_CAPABILITY: flags for TDLS peer capabilities, u32.
  *     As specified in the &enum nl80211_tdls_peer_capability.
  *
+ * @NL80211_ATTR_IFACE_SOCKET_OWNER: flag attribute, if set during interface
+ *     creation then the new interface will be owned by the netlink socket
+ *     that created it and will be destroyed when the socket is closed
+ *
  * @NL80211_ATTR_MAX: highest attribute number currently defined
  * @__NL80211_ATTR_AFTER_LAST: internal use
  */
@@ -1914,6 +1918,8 @@ enum nl80211_attrs {
 
        NL80211_ATTR_TDLS_PEER_CAPABILITY,
 
+       NL80211_ATTR_IFACE_SOCKET_OWNER,
+
        /* add attributes here, update the policy in nl80211.c */
 
        __NL80211_ATTR_AFTER_LAST,
@@ -2336,9 +2342,34 @@ enum nl80211_band_attr {
  *     using this channel as the primary or any of the secondary channels
  *     isn't possible
  * @NL80211_FREQUENCY_ATTR_DFS_CAC_TIME: DFS CAC time in milliseconds.
+ * @NL80211_FREQUENCY_ATTR_INDOOR_ONLY: Only indoor use is permitted on this
+ *     channel. A channel that has the INDOOR_ONLY attribute can only be
+ *     used when there is a clear assessment that the device is operating in
+ *     an indoor surroundings, i.e., it is connected to AC power (and not
+ *     through portable DC inverters) or is under the control of a master
+ *     that is acting as an AP and is connected to AC power.
+ * @NL80211_FREQUENCY_ATTR_GO_CONCURRENT: GO operation is allowed on this
+ *     channel if it's connected concurrently to a BSS on the same channel on
+ *     the 2 GHz band or to a channel in the same UNII band (on the 5 GHz
+ *     band), and IEEE80211_CHAN_RADAR is not set. Instantiating a GO on a
+ *     channel that has the GO_CONCURRENT attribute set can be done when there
+ *     is a clear assessment that the device is operating under the guidance of
+ *     an authorized master, i.e., setting up a GO while the device is also
+ *     connected to an AP with DFS and radar detection on the UNII band (it is
+ *     up to user-space, i.e., wpa_supplicant to perform the required
+ *     verifications)
+ * @NL80211_FREQUENCY_ATTR_NO_20MHZ: 20 MHz operation is not allowed
+ *     on this channel in current regulatory domain.
+ * @NL80211_FREQUENCY_ATTR_NO_10MHZ: 10 MHz operation is not allowed
+ *     on this channel in current regulatory domain.
  * @NL80211_FREQUENCY_ATTR_MAX: highest frequency attribute number
  *     currently defined
  * @__NL80211_FREQUENCY_ATTR_AFTER_LAST: internal use
+ *
+ * See https://apps.fcc.gov/eas/comments/GetPublishedDocument.html?id=327&tn=528122
+ * for more information on the FCC description of the relaxations allowed
+ * by NL80211_FREQUENCY_ATTR_INDOOR_ONLY and
+ * NL80211_FREQUENCY_ATTR_GO_CONCURRENT.
  */
 enum nl80211_frequency_attr {
        __NL80211_FREQUENCY_ATTR_INVALID,
@@ -2355,6 +2386,10 @@ enum nl80211_frequency_attr {
        NL80211_FREQUENCY_ATTR_NO_80MHZ,
        NL80211_FREQUENCY_ATTR_NO_160MHZ,
        NL80211_FREQUENCY_ATTR_DFS_CAC_TIME,
+       NL80211_FREQUENCY_ATTR_INDOOR_ONLY,
+       NL80211_FREQUENCY_ATTR_GO_CONCURRENT,
+       NL80211_FREQUENCY_ATTR_NO_20MHZ,
+       NL80211_FREQUENCY_ATTR_NO_10MHZ,
 
        /* keep last */
        __NL80211_FREQUENCY_ATTR_AFTER_LAST,
@@ -2573,10 +2608,13 @@ enum nl80211_dfs_regions {
  *     present has been registered with the wireless core that
  *     has listed NL80211_FEATURE_CELL_BASE_REG_HINTS as a
  *     supported feature.
+ * @NL80211_USER_REG_HINT_INDOOR: a user sent an hint indicating that the
+ *     platform is operating in an indoor environment.
  */
 enum nl80211_user_reg_hint_type {
        NL80211_USER_REG_HINT_USER      = 0,
        NL80211_USER_REG_HINT_CELL_BASE = 1,
+       NL80211_USER_REG_HINT_INDOOR    = 2,
 };
 
 /**
@@ -3891,6 +3929,9 @@ enum nl80211_ap_sme_features {
  *     interface. An active monitor interface behaves like a normal monitor
  *     interface, but gets added to the driver. It ensures that incoming
  *     unicast packets directed at the configured interface address get ACKed.
+ * @NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE: This driver supports dynamic
+ *     channel bandwidth change (e.g., HT 20 <-> 40 MHz channel) during the
+ *     lifetime of a BSS.
  */
 enum nl80211_feature_flags {
        NL80211_FEATURE_SK_TX_STATUS                    = 1 << 0,
@@ -3911,6 +3952,7 @@ enum nl80211_feature_flags {
        NL80211_FEATURE_FULL_AP_CLIENT_STATE            = 1 << 15,
        NL80211_FEATURE_USERSPACE_MPM                   = 1 << 16,
        NL80211_FEATURE_ACTIVE_MONITOR                  = 1 << 17,
+       NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE       = 1 << 18,
 };
 
 /**
index 852373d27dbb2bdd2016bf6a9d2ba00cd68e2954..6f71b9b4159581eac01241f9c443948584f30f6e 100644 (file)
@@ -38,6 +38,7 @@
 #define _LINUX_TIPC_H_
 
 #include <linux/types.h>
+#include <linux/sockios.h>
 
 /*
  * TIPC addressing primitives
@@ -87,6 +88,7 @@ static inline unsigned int tipc_node(__u32 addr)
 
 #define TIPC_CFG_SRV           0       /* configuration service name type */
 #define TIPC_TOP_SRV           1       /* topology service name type */
+#define TIPC_LINK_STATE                2       /* link state name type */
 #define TIPC_RESERVED_TYPES    64      /* lowest user-publishable name type */
 
 /*
@@ -206,4 +208,25 @@ struct sockaddr_tipc {
 #define TIPC_NODE_RECVQ_DEPTH  131     /* Default: none (read only) */
 #define TIPC_SOCK_RECVQ_DEPTH  132     /* Default: none (read only) */
 
+/*
+ * Maximum sizes of TIPC bearer-related names (including terminating NULL)
+ * The string formatting for each name element is:
+ * media: media
+ * interface: media:interface name
+ * link: Z.C.N:interface-Z.C.N:interface
+ *
+ */
+
+#define TIPC_MAX_MEDIA_NAME    16
+#define TIPC_MAX_IF_NAME       16
+#define TIPC_MAX_BEARER_NAME   32
+#define TIPC_MAX_LINK_NAME     60
+
+#define SIOCGETLINKNAME                SIOCPROTOPRIVATE
+
+struct tipc_sioc_ln_req {
+       __u32 peer;
+       __u32 bearer_id;
+       char linkname[TIPC_MAX_LINK_NAME];
+};
 #endif
index 6b0bff09b3a7ced5dc7cf2c1a07dd4f82112b088..41a76acbb305f85cb4cb0ec6dfab9cab1e20e1d4 100644 (file)
@@ -39,6 +39,7 @@
 
 #include <linux/types.h>
 #include <linux/string.h>
+#include <linux/tipc.h>
 #include <asm/byteorder.h>
 
 #ifndef __KERNEL__
 #define TIPC_TLV_NAME_TBL_QUERY        25      /* struct tipc_name_table_query */
 #define TIPC_TLV_PORT_REF      26      /* 32-bit port reference */
 
-/*
- * Maximum sizes of TIPC bearer-related names (including terminating NUL)
- */
-
-#define TIPC_MAX_MEDIA_NAME    16      /* format = media */
-#define TIPC_MAX_IF_NAME       16      /* format = interface */
-#define TIPC_MAX_BEARER_NAME   32      /* format = media:interface */
-#define TIPC_MAX_LINK_NAME     60      /* format = Z.C.N:interface-Z.C.N:interface */
-
 /*
  * Link priority limits (min, default, max, media default)
  */
index 7c2893602d0651f767e1a177dbfd6214e66e8d9c..81f5f49479da7c64625007a0eb086f720959f728 100644 (file)
@@ -423,6 +423,38 @@ static void kauditd_send_skb(struct sk_buff *skb)
                consume_skb(skb);
 }
 
+/*
+ * kauditd_send_multicast_skb - send the skb to multicast userspace listeners
+ *
+ * This function doesn't consume an skb as might be expected since it has to
+ * copy it anyways.
+ */
+static void kauditd_send_multicast_skb(struct sk_buff *skb)
+{
+       struct sk_buff          *copy;
+       struct audit_net        *aunet = net_generic(&init_net, audit_net_id);
+       struct sock             *sock = aunet->nlsk;
+
+       if (!netlink_has_listeners(sock, AUDIT_NLGRP_READLOG))
+               return;
+
+       /*
+        * The seemingly wasteful skb_copy() rather than bumping the refcount
+        * using skb_get() is necessary because non-standard mods are made to
+        * the skb by the original kaudit unicast socket send routine.  The
+        * existing auditd daemon assumes this breakage.  Fixing this would
+        * require co-ordinating a change in the established protocol between
+        * the kaudit kernel subsystem and the auditd userspace code.  There is
+        * no reason for new multicast clients to continue with this
+        * non-compliance.
+        */
+       copy = skb_copy(skb, GFP_KERNEL);
+       if (!copy)
+               return;
+
+       nlmsg_multicast(sock, copy, 0, AUDIT_NLGRP_READLOG, GFP_KERNEL);
+}
+
 /*
  * flush_hold_queue - empty the hold queue if auditd appears
  *
@@ -643,13 +675,13 @@ static int audit_netlink_ok(struct sk_buff *skb, u16 msg_type)
                if ((task_active_pid_ns(current) != &init_pid_ns))
                        return -EPERM;
 
-               if (!capable(CAP_AUDIT_CONTROL))
+               if (!netlink_capable(skb, CAP_AUDIT_CONTROL))
                        err = -EPERM;
                break;
        case AUDIT_USER:
        case AUDIT_FIRST_USER_MSG ... AUDIT_LAST_USER_MSG:
        case AUDIT_FIRST_USER_MSG2 ... AUDIT_LAST_USER_MSG2:
-               if (!capable(CAP_AUDIT_WRITE))
+               if (!netlink_capable(skb, CAP_AUDIT_WRITE))
                        err = -EPERM;
                break;
        default:  /* bad msg */
@@ -1076,10 +1108,22 @@ static void audit_receive(struct sk_buff  *skb)
        mutex_unlock(&audit_cmd_mutex);
 }
 
+/* Run custom bind function on netlink socket group connect or bind requests. */
+static int audit_bind(int group)
+{
+       if (!capable(CAP_AUDIT_READ))
+               return -EPERM;
+
+       return 0;
+}
+
 static int __net_init audit_net_init(struct net *net)
 {
        struct netlink_kernel_cfg cfg = {
                .input  = audit_receive,
+               .bind   = audit_bind,
+               .flags  = NL_CFG_F_NONROOT_RECV,
+               .groups = AUDIT_NLGRP_MAX,
        };
 
        struct audit_net *aunet = net_generic(net, audit_net_id);
@@ -1901,10 +1945,10 @@ out:
  * audit_log_end - end one audit record
  * @ab: the audit_buffer
  *
- * The netlink_* functions cannot be called inside an irq context, so
- * the audit buffer is placed on a queue and a tasklet is scheduled to
- * remove them from the queue outside the irq context.  May be called in
- * any context.
+ * netlink_unicast() cannot be called inside an irq context because it blocks
+ * (last arg, flags, is not set to MSG_DONTWAIT), so the audit buffer is placed
+ * on a queue and a tasklet is scheduled to remove them from the queue outside
+ * the irq context.  May be called in any context.
  */
 void audit_log_end(struct audit_buffer *ab)
 {
@@ -1914,6 +1958,18 @@ void audit_log_end(struct audit_buffer *ab)
                audit_log_lost("rate limit exceeded");
        } else {
                struct nlmsghdr *nlh = nlmsg_hdr(ab->skb);
+
+               kauditd_send_multicast_skb(ab->skb);
+
+               /*
+                * The original kaudit unicast socket sends up messages with
+                * nlmsg_len set to the payload length rather than the entire
+                * message length.  This breaks the standard set by netlink.
+                * The existing auditd daemon assumes this breakage.  Fixing
+                * this would require co-ordinating a change in the established
+                * protocol between the kaudit kernel subsystem and the auditd
+                * userspace code.
+                */
                nlh->nlmsg_len = ab->skb->len - NLMSG_HDRLEN;
 
                if (audit_pid) {
index d55092ceee2975c204bcb90e856f9b6504d577ac..6b715c0af1b117b5b61bd32629a00845f0313557 100644 (file)
@@ -234,6 +234,11 @@ again:
                        goto again;
                }
                timer->base = new_base;
+       } else {
+               if (cpu != this_cpu && hrtimer_check_target(timer, new_base)) {
+                       cpu = this_cpu;
+                       goto again;
+               }
        }
        return new_base;
 }
@@ -569,6 +574,23 @@ hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base, int skip_equal)
 
        cpu_base->expires_next.tv64 = expires_next.tv64;
 
+       /*
+        * If a hang was detected in the last timer interrupt then we
+        * leave the hang delay active in the hardware. We want the
+        * system to make progress. That also prevents the following
+        * scenario:
+        * T1 expires 50ms from now
+        * T2 expires 5s from now
+        *
+        * T1 is removed, so this code is called and would reprogram
+        * the hardware to 5s from now. Any hrtimer_start after that
+        * will not reprogram the hardware due to hang_detected being
+        * set. So we'd effectivly block all timers until the T2 event
+        * fires.
+        */
+       if (cpu_base->hang_detected)
+               return;
+
        if (cpu_base->expires_next.tv64 != KTIME_MAX)
                tick_program_event(cpu_base->expires_next, 1);
 }
index a7174617616ba6b8f404a1c3f01cf8b7dd90cb4d..bb07f2928f4b9c2ca33803f712c8889ca5823907 100644 (file)
@@ -363,6 +363,13 @@ __irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node,
                if (from > irq)
                        return -EINVAL;
                from = irq;
+       } else {
+               /*
+                * For interrupts which are freely allocated the
+                * architecture can force a lower bound to the @from
+                * argument. x86 uses this to exclude the GSI space.
+                */
+               from = arch_dynirq_lower_bound(from);
        }
 
        mutex_lock(&sparse_irq_lock);
index 2486a4c1a710ba057c7f884faae19bff1fc6d31c..d34131ca372baee79aa17ca2670cde5ff32a2cc8 100644 (file)
@@ -180,7 +180,7 @@ int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,
        struct irq_chip *chip = irq_data_get_irq_chip(data);
        int ret;
 
-       ret = chip->irq_set_affinity(data, mask, false);
+       ret = chip->irq_set_affinity(data, mask, force);
        switch (ret) {
        case IRQ_SET_MASK_OK:
                cpumask_copy(data->affinity, mask);
@@ -192,7 +192,8 @@ int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,
        return ret;
 }
 
-int __irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask)
+int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask,
+                           bool force)
 {
        struct irq_chip *chip = irq_data_get_irq_chip(data);
        struct irq_desc *desc = irq_data_to_desc(data);
@@ -202,7 +203,7 @@ int __irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask)
                return -EINVAL;
 
        if (irq_can_move_pcntxt(data)) {
-               ret = irq_do_set_affinity(data, mask, false);
+               ret = irq_do_set_affinity(data, mask, force);
        } else {
                irqd_set_move_pending(data);
                irq_copy_pending(desc, mask);
@@ -217,13 +218,7 @@ int __irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask)
        return ret;
 }
 
-/**
- *     irq_set_affinity - Set the irq affinity of a given irq
- *     @irq:           Interrupt to set affinity
- *     @mask:          cpumask
- *
- */
-int irq_set_affinity(unsigned int irq, const struct cpumask *mask)
+int __irq_set_affinity(unsigned int irq, const struct cpumask *mask, bool force)
 {
        struct irq_desc *desc = irq_to_desc(irq);
        unsigned long flags;
@@ -233,7 +228,7 @@ int irq_set_affinity(unsigned int irq, const struct cpumask *mask)
                return -EINVAL;
 
        raw_spin_lock_irqsave(&desc->lock, flags);
-       ret =  __irq_set_affinity_locked(irq_desc_get_irq_data(desc), mask);
+       ret = irq_set_affinity_locked(irq_desc_get_irq_data(desc), mask, force);
        raw_spin_unlock_irqrestore(&desc->lock, flags);
        return ret;
 }
index 11869408f79b86abe33e5194d0f5c705b44e9d81..079c4615607d6ed266330a5416529bfcc37e4db0 100644 (file)
@@ -815,9 +815,6 @@ SYSCALL_DEFINE2(delete_module, const char __user *, name_user,
                return -EFAULT;
        name[MODULE_NAME_LEN-1] = '\0';
 
-       if (!(flags & O_NONBLOCK))
-               pr_warn("waiting module removal not supported: please upgrade\n");
-
        if (mutex_lock_interruptible(&module_mutex) != 0)
                return -EINTR;
 
@@ -3271,6 +3268,9 @@ static int load_module(struct load_info *info, const char __user *uargs,
 
        dynamic_debug_setup(info->debug, info->num_debug);
 
+       /* Ftrace init must be called in the MODULE_STATE_UNFORMED state */
+       ftrace_module_init(mod);
+
        /* Finally it's fully formed, ready to start executing. */
        err = complete_formation(mod, info);
        if (err)
index c3ad9cafe930e550a6400dc1994f8ee86570d885..8233cd4047d776c311ef71800479f1e2b637e5da 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/init.h>
 #include <linux/console.h>
 #include <linux/cpu.h>
+#include <linux/cpuidle.h>
 #include <linux/syscalls.h>
 #include <linux/gfp.h>
 #include <linux/io.h>
@@ -53,7 +54,9 @@ static void freeze_begin(void)
 
 static void freeze_enter(void)
 {
+       cpuidle_resume();
        wait_event(suspend_freeze_wait_head, suspend_freeze_wake);
+       cpuidle_pause();
 }
 
 void freeze_wake(void)
index b35c21503a36d6e63160f7f46a2eb7bf59d371ce..1036b6f2fdedaaa905284ecf7c46a8ec4286b05e 100644 (file)
@@ -54,8 +54,7 @@
 struct seccomp_filter {
        atomic_t usage;
        struct seccomp_filter *prev;
-       unsigned short len;  /* Instruction count */
-       struct sock_filter_int insnsi[];
+       struct sk_filter *prog;
 };
 
 /* Limit any path through the tree to 256KB worth of instructions. */
@@ -189,7 +188,8 @@ static u32 seccomp_run_filters(int syscall)
         * value always takes priority (ignoring the DATA).
         */
        for (f = current->seccomp.filter; f; f = f->prev) {
-               u32 cur_ret = sk_run_filter_int_seccomp(&sd, f->insnsi);
+               u32 cur_ret = SK_RUN_FILTER(f->prog, (void *)&sd);
+
                if ((cur_ret & SECCOMP_RET_ACTION) < (ret & SECCOMP_RET_ACTION))
                        ret = cur_ret;
        }
@@ -215,7 +215,7 @@ static long seccomp_attach_filter(struct sock_fprog *fprog)
                return -EINVAL;
 
        for (filter = current->seccomp.filter; filter; filter = filter->prev)
-               total_insns += filter->len + 4;  /* include a 4 instr penalty */
+               total_insns += filter->prog->len + 4;  /* include a 4 instr penalty */
        if (total_insns > MAX_INSNS_PER_PATH)
                return -ENOMEM;
 
@@ -256,19 +256,25 @@ static long seccomp_attach_filter(struct sock_fprog *fprog)
 
        /* Allocate a new seccomp_filter */
        ret = -ENOMEM;
-       filter = kzalloc(sizeof(struct seccomp_filter) +
-                        sizeof(struct sock_filter_int) * new_len,
+       filter = kzalloc(sizeof(struct seccomp_filter),
                         GFP_KERNEL|__GFP_NOWARN);
        if (!filter)
                goto free_prog;
 
-       ret = sk_convert_filter(fp, fprog->len, filter->insnsi, &new_len);
-       if (ret)
+       filter->prog = kzalloc(sk_filter_size(new_len),
+                              GFP_KERNEL|__GFP_NOWARN);
+       if (!filter->prog)
                goto free_filter;
+
+       ret = sk_convert_filter(fp, fprog->len, filter->prog->insnsi, &new_len);
+       if (ret)
+               goto free_filter_prog;
        kfree(fp);
 
        atomic_set(&filter->usage, 1);
-       filter->len = new_len;
+       filter->prog->len = new_len;
+
+       sk_filter_select_runtime(filter->prog);
 
        /*
         * If there is an existing filter, make it the prev and don't drop its
@@ -278,6 +284,8 @@ static long seccomp_attach_filter(struct sock_fprog *fprog)
        current->seccomp.filter = filter;
        return 0;
 
+free_filter_prog:
+       kfree(filter->prog);
 free_filter:
        kfree(filter);
 free_prog:
@@ -330,6 +338,7 @@ void put_seccomp_filter(struct task_struct *tsk)
        while (orig && atomic_dec_and_test(&orig->usage)) {
                struct seccomp_filter *freeme = orig;
                orig = orig->prev;
+               sk_filter_free(freeme->prog);
                kfree(freeme);
        }
 }
index b50990a5bea0220df9034f0bcc71d92e452edc78..33e4648ae0e7cd908671ef1a8ab60bbb562c097c 100644 (file)
@@ -779,3 +779,8 @@ int __init __weak arch_early_irq_init(void)
 {
        return 0;
 }
+
+unsigned int __weak arch_dynirq_lower_bound(unsigned int from)
+{
+       return from;
+}
index 74f5b580fe34904fa4d9bcb2790545a323367773..e36ae4b15726041337e74b26b0a09dcec6ff73b8 100644 (file)
@@ -2501,11 +2501,11 @@ int proc_do_large_bitmap(struct ctl_table *table, int write,
        bool first = 1;
        size_t left = *lenp;
        unsigned long bitmap_len = table->maxlen;
-       unsigned long *bitmap = (unsigned long *) table->data;
+       unsigned long *bitmap = *(unsigned long **) table->data;
        unsigned long *tmp_bitmap = NULL;
        char tr_a[] = { '-', ',', '\n' }, tr_b[] = { ',', '\n', 0 }, c;
 
-       if (!bitmap_len || !left || (*ppos && !write)) {
+       if (!bitmap || !bitmap_len || !left || (*ppos && !write)) {
                *lenp = 0;
                return 0;
        }
index 87bd529879c23bb12705fa0144cff354064f91dc..3bb01a323b2a3e0ae9291271f4dc0322f01bfd80 100644 (file)
@@ -838,7 +838,7 @@ unsigned long apply_slack(struct timer_list *timer, unsigned long expires)
 
        bit = find_last_bit(&mask, BITS_PER_LONG);
 
-       mask = (1 << bit) - 1;
+       mask = (1UL << bit) - 1;
 
        expires_limit = expires_limit & ~(mask);
 
index 1fd4b9479210183762293944be777abb5435f8e3..4a54a25afa2fe67165cb6f65cd0b63c42eb724eb 100644 (file)
@@ -4330,16 +4330,11 @@ static void ftrace_init_module(struct module *mod,
        ftrace_process_locs(mod, start, end);
 }
 
-static int ftrace_module_notify_enter(struct notifier_block *self,
-                                     unsigned long val, void *data)
+void ftrace_module_init(struct module *mod)
 {
-       struct module *mod = data;
-
-       if (val == MODULE_STATE_COMING)
-               ftrace_init_module(mod, mod->ftrace_callsites,
-                                  mod->ftrace_callsites +
-                                  mod->num_ftrace_callsites);
-       return 0;
+       ftrace_init_module(mod, mod->ftrace_callsites,
+                          mod->ftrace_callsites +
+                          mod->num_ftrace_callsites);
 }
 
 static int ftrace_module_notify_exit(struct notifier_block *self,
@@ -4353,11 +4348,6 @@ static int ftrace_module_notify_exit(struct notifier_block *self,
        return 0;
 }
 #else
-static int ftrace_module_notify_enter(struct notifier_block *self,
-                                     unsigned long val, void *data)
-{
-       return 0;
-}
 static int ftrace_module_notify_exit(struct notifier_block *self,
                                     unsigned long val, void *data)
 {
@@ -4365,11 +4355,6 @@ static int ftrace_module_notify_exit(struct notifier_block *self,
 }
 #endif /* CONFIG_MODULES */
 
-struct notifier_block ftrace_module_enter_nb = {
-       .notifier_call = ftrace_module_notify_enter,
-       .priority = INT_MAX,    /* Run before anything that can use kprobes */
-};
-
 struct notifier_block ftrace_module_exit_nb = {
        .notifier_call = ftrace_module_notify_exit,
        .priority = INT_MIN,    /* Run after anything that can remove kprobes */
@@ -4403,10 +4388,6 @@ void __init ftrace_init(void)
                                  __start_mcount_loc,
                                  __stop_mcount_loc);
 
-       ret = register_module_notifier(&ftrace_module_enter_nb);
-       if (ret)
-               pr_warning("Failed to register trace ftrace module enter notifier\n");
-
        ret = register_module_notifier(&ftrace_module_exit_nb);
        if (ret)
                pr_warning("Failed to register trace ftrace module exit notifier\n");
index 925f537f07d17db7caae363dd39a20bd2296d2ee..4747b476a0300bc3c08ad82f97d6a7a10fb638c4 100644 (file)
@@ -77,7 +77,7 @@ event_triggers_call(struct ftrace_event_file *file, void *rec)
                        data->ops->func(data);
                        continue;
                }
-               filter = rcu_dereference(data->filter);
+               filter = rcu_dereference_sched(data->filter);
                if (filter && !filter_match_preds(filter, rec))
                        continue;
                if (data->cmd_ops->post_trigger) {
index 819ac51202c01006e105f91355d492db6bdd6eb6..d1b7bdfb8f8e174bd584a6ad616d881fcc6635b1 100644 (file)
@@ -1620,6 +1620,19 @@ config TEST_USER_COPY
 
          If unsure, say N.
 
+config TEST_BPF
+       tristate "Test BPF filter functionality"
+       default n
+       depends on m && NET
+       help
+         This builds the "test_bpf" module that runs various test vectors
+         against the BPF interpreter or BPF JIT compiler depending on the
+         current setting. This is in particular useful for BPF JIT compiler
+         development, but also to run regression tests against changes in
+         the interpreter code.
+
+         If unsure, say N.
+
 source "samples/Kconfig"
 
 source "lib/Kconfig.kgdb"
index 0cd7b68e1382dee93301898da70a6ca51c3d764e..b2be1ef1e8ece2939550b3ffb029d5bbba24dac4 100644 (file)
@@ -33,6 +33,7 @@ obj-y += kstrtox.o
 obj-$(CONFIG_TEST_KSTRTOX) += test-kstrtox.o
 obj-$(CONFIG_TEST_MODULE) += test_module.o
 obj-$(CONFIG_TEST_USER_COPY) += test_user_copy.o
+obj-$(CONFIG_TEST_BPF) += test_bpf.o
 
 ifeq ($(CONFIG_DEBUG_KOBJECT),y)
 CFLAGS_kobject.o += -DDEBUG
diff --git a/lib/test_bpf.c b/lib/test_bpf.c
new file mode 100644 (file)
index 0000000..e160934
--- /dev/null
@@ -0,0 +1,1546 @@
+/*
+ * Testsuite for BPF interpreter and BPF JIT compiler
+ *
+ * Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/filter.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/if_vlan.h>
+
+#define MAX_SUBTESTS   3
+#define MAX_DATA       128
+#define MAX_INSNS      512
+#define MAX_K          0xffffFFFF
+
+/* define few constants used to init test 'skb' */
+#define SKB_TYPE       3
+#define SKB_MARK       0x1234aaaa
+#define SKB_HASH       0x1234aaab
+#define SKB_QUEUE_MAP  123
+#define SKB_VLAN_TCI   0xffff
+#define SKB_DEV_IFINDEX        577
+#define SKB_DEV_TYPE   588
+
+/* redefine REGs to make tests less verbose */
+#define R0 BPF_REG_0
+#define R1 BPF_REG_1
+#define R2 BPF_REG_2
+#define R3 BPF_REG_3
+#define R4 BPF_REG_4
+#define R5 BPF_REG_5
+#define R6 BPF_REG_6
+#define R7 BPF_REG_7
+#define R8 BPF_REG_8
+#define R9 BPF_REG_9
+#define R10 BPF_REG_10
+
+struct bpf_test {
+       const char *descr;
+       union {
+               struct sock_filter insns[MAX_INSNS];
+               struct sock_filter_int insns_int[MAX_INSNS];
+       };
+       enum {
+               NO_DATA,
+               EXPECTED_FAIL,
+               SKB,
+               SKB_INT
+       } data_type;
+       __u8 data[MAX_DATA];
+       struct {
+               int data_size;
+               __u32 result;
+       } test[MAX_SUBTESTS];
+};
+
+static struct bpf_test tests[] = {
+       {
+               "TAX",
+               .insns = {
+                       BPF_STMT(BPF_LD | BPF_IMM, 1),
+                       BPF_STMT(BPF_MISC | BPF_TAX, 0),
+                       BPF_STMT(BPF_LD | BPF_IMM, 2),
+                       BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
+                       BPF_STMT(BPF_ALU | BPF_NEG, 0), /* A == -3 */
+                       BPF_STMT(BPF_MISC | BPF_TAX, 0),
+                       BPF_STMT(BPF_LD | BPF_LEN, 0),
+                       BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
+                       BPF_STMT(BPF_MISC | BPF_TAX, 0), /* X == len - 3 */
+                       BPF_STMT(BPF_LD | BPF_B | BPF_IND, 1),
+                       BPF_STMT(BPF_RET | BPF_A, 0)
+               },
+               SKB,
+               { 10, 20, 30, 40, 50 },
+               { { 2, 10 }, { 3, 20 }, { 4, 30 } },
+       },
+       {
+               "TXA",
+               .insns = {
+                       BPF_STMT(BPF_LDX | BPF_LEN, 0),
+                       BPF_STMT(BPF_MISC | BPF_TXA, 0),
+                       BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
+                       BPF_STMT(BPF_RET | BPF_A, 0) /* A == len * 2 */
+               },
+               SKB,
+               { 10, 20, 30, 40, 50 },
+               { { 1, 2 }, { 3, 6 }, { 4, 8 } },
+       },
+       {
+               "ADD_SUB_MUL_K",
+               .insns = {
+                       BPF_STMT(BPF_LD | BPF_IMM, 1),
+                       BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 2),
+                       BPF_STMT(BPF_LDX | BPF_IMM, 3),
+                       BPF_STMT(BPF_ALU | BPF_SUB | BPF_X, 0),
+                       BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 0xffffffff),
+                       BPF_STMT(BPF_ALU | BPF_MUL | BPF_K, 3),
+                       BPF_STMT(BPF_RET | BPF_A, 0)
+               },
+               0,
+               { },
+               { { 0, 0xfffffffd } }
+       },
+       {
+               "DIV_KX",
+               .insns = {
+                       BPF_STMT(BPF_LD | BPF_IMM, 8),
+                       BPF_STMT(BPF_ALU | BPF_DIV | BPF_K, 2),
+                       BPF_STMT(BPF_MISC | BPF_TAX, 0),
+                       BPF_STMT(BPF_LD | BPF_IMM, 0xffffffff),
+                       BPF_STMT(BPF_ALU | BPF_DIV | BPF_X, 0),
+                       BPF_STMT(BPF_MISC | BPF_TAX, 0),
+                       BPF_STMT(BPF_LD | BPF_IMM, 0xffffffff),
+                       BPF_STMT(BPF_ALU | BPF_DIV | BPF_K, 0x70000000),
+                       BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
+                       BPF_STMT(BPF_RET | BPF_A, 0)
+               },
+               0,
+               { },
+               { { 0, 0x40000001 } }
+       },
+       {
+               "AND_OR_LSH_K",
+               .insns = {
+                       BPF_STMT(BPF_LD | BPF_IMM, 0xff),
+                       BPF_STMT(BPF_ALU | BPF_AND | BPF_K, 0xf0),
+                       BPF_STMT(BPF_ALU | BPF_LSH | BPF_K, 27),
+                       BPF_STMT(BPF_MISC | BPF_TAX, 0),
+                       BPF_STMT(BPF_LD | BPF_IMM, 0xf),
+                       BPF_STMT(BPF_ALU | BPF_OR | BPF_K, 0xf0),
+                       BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
+                       BPF_STMT(BPF_RET | BPF_A, 0)
+               },
+               0,
+               { },
+               { { 0, 0x800000ff }, { 1, 0x800000ff } },
+       },
+       {
+               "LD_IND",
+               .insns = {
+                       BPF_STMT(BPF_LDX | BPF_LEN, 0),
+                       BPF_STMT(BPF_LD | BPF_H | BPF_IND, MAX_K),
+                       BPF_STMT(BPF_RET | BPF_K, 1)
+               },
+               SKB,
+               { },
+               { { 1, 0 }, { 10, 0 }, { 60, 0 } },
+       },
+       {
+               "LD_ABS",
+               .insns = {
+                       BPF_STMT(BPF_LD | BPF_W | BPF_ABS, 1000),
+                       BPF_STMT(BPF_RET | BPF_K, 1)
+               },
+               SKB,
+               { },
+               { { 1, 0 }, { 10, 0 }, { 60, 0 } },
+       },
+       {
+               "LD_ABS_LL",
+               .insns = {
+                       BPF_STMT(BPF_LD | BPF_B | BPF_ABS, SKF_LL_OFF),
+                       BPF_STMT(BPF_MISC | BPF_TAX, 0),
+                       BPF_STMT(BPF_LD | BPF_B | BPF_ABS, SKF_LL_OFF + 1),
+                       BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
+                       BPF_STMT(BPF_RET | BPF_A, 0)
+               },
+               SKB,
+               { 1, 2, 3 },
+               { { 1, 0 }, { 2, 3 } },
+       },
+       {
+               "LD_IND_LL",
+               .insns = {
+                       BPF_STMT(BPF_LD | BPF_IMM, SKF_LL_OFF - 1),
+                       BPF_STMT(BPF_LDX | BPF_LEN, 0),
+                       BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
+                       BPF_STMT(BPF_MISC | BPF_TAX, 0),
+                       BPF_STMT(BPF_LD | BPF_B | BPF_IND, 0),
+                       BPF_STMT(BPF_RET | BPF_A, 0)
+               },
+               SKB,
+               { 1, 2, 3, 0xff },
+               { { 1, 1 }, { 3, 3 }, { 4, 0xff } },
+       },
+       {
+               "LD_ABS_NET",
+               .insns = {
+                       BPF_STMT(BPF_LD | BPF_B | BPF_ABS, SKF_NET_OFF),
+                       BPF_STMT(BPF_MISC | BPF_TAX, 0),
+                       BPF_STMT(BPF_LD | BPF_B | BPF_ABS, SKF_NET_OFF + 1),
+                       BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
+                       BPF_STMT(BPF_RET | BPF_A, 0)
+               },
+               SKB,
+               { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 3 },
+               { { 15, 0 }, { 16, 3 } },
+       },
+       {
+               "LD_IND_NET",
+               .insns = {
+                       BPF_STMT(BPF_LD | BPF_IMM, SKF_NET_OFF - 15),
+                       BPF_STMT(BPF_LDX | BPF_LEN, 0),
+                       BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
+                       BPF_STMT(BPF_MISC | BPF_TAX, 0),
+                       BPF_STMT(BPF_LD | BPF_B | BPF_IND, 0),
+                       BPF_STMT(BPF_RET | BPF_A, 0)
+               },
+               SKB,
+               { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 3 },
+               { { 14, 0 }, { 15, 1 }, { 17, 3 } },
+       },
+       {
+               "LD_PKTTYPE",
+               .insns = {
+                       BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
+                                SKF_AD_OFF + SKF_AD_PKTTYPE),
+                       BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, SKB_TYPE, 1, 0),
+                       BPF_STMT(BPF_RET | BPF_K, 1),
+                       BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
+                                SKF_AD_OFF + SKF_AD_PKTTYPE),
+                       BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, SKB_TYPE, 1, 0),
+                       BPF_STMT(BPF_RET | BPF_K, 1),
+                       BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
+                                SKF_AD_OFF + SKF_AD_PKTTYPE),
+                       BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, SKB_TYPE, 1, 0),
+                       BPF_STMT(BPF_RET | BPF_K, 1),
+                       BPF_STMT(BPF_RET | BPF_A, 0)
+               },
+               SKB,
+               { },
+               { { 1, 3 }, { 10, 3 } },
+       },
+       {
+               "LD_MARK",
+               .insns = {
+                       BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
+                                SKF_AD_OFF + SKF_AD_MARK),
+                       BPF_STMT(BPF_RET | BPF_A, 0)
+               },
+               SKB,
+               { },
+               { { 1, SKB_MARK}, { 10, SKB_MARK} },
+       },
+       {
+               "LD_RXHASH",
+               .insns = {
+                       BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
+                                SKF_AD_OFF + SKF_AD_RXHASH),
+                       BPF_STMT(BPF_RET | BPF_A, 0)
+               },
+               SKB,
+               { },
+               { { 1, SKB_HASH}, { 10, SKB_HASH} },
+       },
+       {
+               "LD_QUEUE",
+               .insns = {
+                       BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
+                                SKF_AD_OFF + SKF_AD_QUEUE),
+                       BPF_STMT(BPF_RET | BPF_A, 0)
+               },
+               SKB,
+               { },
+               { { 1, SKB_QUEUE_MAP }, { 10, SKB_QUEUE_MAP } },
+       },
+       {
+               "LD_PROTOCOL",
+               .insns = {
+                       BPF_STMT(BPF_LD | BPF_B | BPF_ABS, 1),
+                       BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 20, 1, 0),
+                       BPF_STMT(BPF_RET | BPF_K, 0),
+                       BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
+                                SKF_AD_OFF + SKF_AD_PROTOCOL),
+                       BPF_STMT(BPF_MISC | BPF_TAX, 0),
+                       BPF_STMT(BPF_LD | BPF_B | BPF_ABS, 2),
+                       BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 30, 1, 0),
+                       BPF_STMT(BPF_RET | BPF_K, 0),
+                       BPF_STMT(BPF_MISC | BPF_TXA, 0),
+                       BPF_STMT(BPF_RET | BPF_A, 0)
+               },
+               SKB,
+               { 10, 20, 30 },
+               { { 10, ETH_P_IP }, { 100, ETH_P_IP } },
+       },
+       {
+               "LD_VLAN_TAG",
+               .insns = {
+                       BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
+                                SKF_AD_OFF + SKF_AD_VLAN_TAG),
+                       BPF_STMT(BPF_RET | BPF_A, 0)
+               },
+               SKB,
+               { },
+               {
+                       { 1, SKB_VLAN_TCI & ~VLAN_TAG_PRESENT },
+                       { 10, SKB_VLAN_TCI & ~VLAN_TAG_PRESENT }
+               },
+       },
+       {
+               "LD_VLAN_TAG_PRESENT",
+               .insns = {
+                       BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
+                                SKF_AD_OFF + SKF_AD_VLAN_TAG_PRESENT),
+                       BPF_STMT(BPF_RET | BPF_A, 0)
+               },
+               SKB,
+               { },
+               {
+                       { 1, !!(SKB_VLAN_TCI & VLAN_TAG_PRESENT) },
+                       { 10, !!(SKB_VLAN_TCI & VLAN_TAG_PRESENT) }
+               },
+       },
+       {
+               "LD_IFINDEX",
+               .insns = {
+                       BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
+                                SKF_AD_OFF + SKF_AD_IFINDEX),
+                       BPF_STMT(BPF_RET | BPF_A, 0)
+               },
+               SKB,
+               { },
+               { { 1, SKB_DEV_IFINDEX }, { 10, SKB_DEV_IFINDEX } },
+       },
+       {
+               "LD_HATYPE",
+               .insns = {
+                       BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
+                                SKF_AD_OFF + SKF_AD_HATYPE),
+                       BPF_STMT(BPF_RET | BPF_A, 0)
+               },
+               SKB,
+               { },
+               { { 1, SKB_DEV_TYPE }, { 10, SKB_DEV_TYPE } },
+       },
+       {
+               "LD_CPU",
+               .insns = {
+                       BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
+                                SKF_AD_OFF + SKF_AD_CPU),
+                       BPF_STMT(BPF_MISC | BPF_TAX, 0),
+                       BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
+                                SKF_AD_OFF + SKF_AD_CPU),
+                       BPF_STMT(BPF_ALU | BPF_SUB | BPF_X, 0),
+                       BPF_STMT(BPF_RET | BPF_A, 0)
+               },
+               SKB,
+               { },
+               { { 1, 0 }, { 10, 0 } },
+       },
+       {
+               "LD_NLATTR",
+               .insns = {
+                       BPF_STMT(BPF_LDX | BPF_IMM, 1),
+                       BPF_STMT(BPF_MISC | BPF_TXA, 0),
+                       BPF_STMT(BPF_LDX | BPF_IMM, 3),
+                       BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
+                                SKF_AD_OFF + SKF_AD_NLATTR),
+                       BPF_STMT(BPF_RET | BPF_A, 0)
+               },
+               SKB,
+               { 0xff, 4, 0, 2, 0, 4, 0, 3, 0 },
+               { { 4, 0 }, { 20, 5 } },
+       },
+       {
+               "LD_NLATTR_NEST",
+               .insns = {
+                       BPF_STMT(BPF_LD | BPF_IMM, 1),
+                       BPF_STMT(BPF_LDX | BPF_IMM, 3),
+                       BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
+                                SKF_AD_OFF + SKF_AD_NLATTR_NEST),
+                       BPF_STMT(BPF_LD | BPF_IMM, 1),
+                       BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
+                                SKF_AD_OFF + SKF_AD_NLATTR_NEST),
+                       BPF_STMT(BPF_LD | BPF_IMM, 1),
+                       BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
+                                SKF_AD_OFF + SKF_AD_NLATTR_NEST),
+                       BPF_STMT(BPF_LD | BPF_IMM, 1),
+                       BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
+                                SKF_AD_OFF + SKF_AD_NLATTR_NEST),
+                       BPF_STMT(BPF_LD | BPF_IMM, 1),
+                       BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
+                                SKF_AD_OFF + SKF_AD_NLATTR_NEST),
+                       BPF_STMT(BPF_LD | BPF_IMM, 1),
+                       BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
+                                SKF_AD_OFF + SKF_AD_NLATTR_NEST),
+                       BPF_STMT(BPF_LD | BPF_IMM, 1),
+                       BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
+                                SKF_AD_OFF + SKF_AD_NLATTR_NEST),
+                       BPF_STMT(BPF_LD | BPF_IMM, 1),
+                       BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
+                                SKF_AD_OFF + SKF_AD_NLATTR_NEST),
+                       BPF_STMT(BPF_RET | BPF_A, 0)
+               },
+               SKB,
+               { 0xff, 12, 0, 1, 0, 4, 0, 2, 0, 4, 0, 3, 0 },
+               { { 4, 0 }, { 20, 9 } },
+       },
+       {
+               "LD_PAYLOAD_OFF",
+               .insns = {
+                       BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
+                                SKF_AD_OFF + SKF_AD_PAY_OFFSET),
+                       BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
+                                SKF_AD_OFF + SKF_AD_PAY_OFFSET),
+                       BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
+                                SKF_AD_OFF + SKF_AD_PAY_OFFSET),
+                       BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
+                                SKF_AD_OFF + SKF_AD_PAY_OFFSET),
+                       BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
+                                SKF_AD_OFF + SKF_AD_PAY_OFFSET),
+                       BPF_STMT(BPF_RET | BPF_A, 0)
+               },
+               SKB,
+               /* 00:00:00:00:00:00 > 00:00:00:00:00:00, ethtype IPv4 (0x0800),
+                * length 98: 127.0.0.1 > 127.0.0.1: ICMP echo request,
+                * id 9737, seq 1, length 64
+                */
+               { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+                 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+                 0x08, 0x00,
+                 0x45, 0x00, 0x00, 0x54, 0xac, 0x8b, 0x40, 0x00, 0x40,
+                 0x01, 0x90, 0x1b, 0x7f, 0x00, 0x00, 0x01 },
+               { { 30, 0 }, { 100, 42 } },
+       },
+       {
+               "LD_ANC_XOR",
+               .insns = {
+                       BPF_STMT(BPF_LD | BPF_IMM, 10),
+                       BPF_STMT(BPF_LDX | BPF_IMM, 300),
+                       BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
+                                SKF_AD_OFF + SKF_AD_ALU_XOR_X),
+                       BPF_STMT(BPF_RET | BPF_A, 0)
+               },
+               SKB,
+               { },
+               { { 4, 10 ^ 300 }, { 20, 10 ^ 300 } },
+       },
+       {
+               "SPILL_FILL",
+               .insns = {
+                       BPF_STMT(BPF_LDX | BPF_LEN, 0),
+                       BPF_STMT(BPF_LD | BPF_IMM, 2),
+                       BPF_STMT(BPF_ALU | BPF_RSH, 1),
+                       BPF_STMT(BPF_ALU | BPF_XOR | BPF_X, 0),
+                       BPF_STMT(BPF_ST, 1), /* M1 = 1 ^ len */
+                       BPF_STMT(BPF_ALU | BPF_XOR | BPF_K, 0x80000000),
+                       BPF_STMT(BPF_ST, 2), /* M2 = 1 ^ len ^ 0x80000000 */
+                       BPF_STMT(BPF_STX, 15), /* M3 = len */
+                       BPF_STMT(BPF_LDX | BPF_MEM, 1),
+                       BPF_STMT(BPF_LD | BPF_MEM, 2),
+                       BPF_STMT(BPF_ALU | BPF_XOR | BPF_X, 0),
+                       BPF_STMT(BPF_LDX | BPF_MEM, 15),
+                       BPF_STMT(BPF_ALU | BPF_XOR | BPF_X, 0),
+                       BPF_STMT(BPF_RET | BPF_A, 0)
+               },
+               SKB,
+               { },
+               { { 1, 0x80000001 }, { 2, 0x80000002 }, { 60, 0x80000000 ^ 60 } }
+       },
+       {
+               "JEQ",
+               .insns = {
+                       BPF_STMT(BPF_LDX | BPF_LEN, 0),
+                       BPF_STMT(BPF_LD | BPF_B | BPF_ABS, 2),
+                       BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_X, 0, 0, 1),
+                       BPF_STMT(BPF_RET | BPF_K, 1),
+                       BPF_STMT(BPF_RET | BPF_K, MAX_K)
+               },
+               SKB,
+               { 3, 3, 3, 3, 3 },
+               { { 1, 0 }, { 3, 1 }, { 4, MAX_K } },
+       },
+       {
+               "JGT",
+               .insns = {
+                       BPF_STMT(BPF_LDX | BPF_LEN, 0),
+                       BPF_STMT(BPF_LD | BPF_B | BPF_ABS, 2),
+                       BPF_JUMP(BPF_JMP | BPF_JGT | BPF_X, 0, 0, 1),
+                       BPF_STMT(BPF_RET | BPF_K, 1),
+                       BPF_STMT(BPF_RET | BPF_K, MAX_K)
+               },
+               SKB,
+               { 4, 4, 4, 3, 3 },
+               { { 2, 0 }, { 3, 1 }, { 4, MAX_K } },
+       },
+       {
+               "JGE",
+               .insns = {
+                       BPF_STMT(BPF_LDX | BPF_LEN, 0),
+                       BPF_STMT(BPF_LD | BPF_B | BPF_IND, MAX_K),
+                       BPF_JUMP(BPF_JMP | BPF_JGE | BPF_K, 1, 1, 0),
+                       BPF_STMT(BPF_RET | BPF_K, 10),
+                       BPF_JUMP(BPF_JMP | BPF_JGE | BPF_K, 2, 1, 0),
+                       BPF_STMT(BPF_RET | BPF_K, 20),
+                       BPF_JUMP(BPF_JMP | BPF_JGE | BPF_K, 3, 1, 0),
+                       BPF_STMT(BPF_RET | BPF_K, 30),
+                       BPF_JUMP(BPF_JMP | BPF_JGE | BPF_K, 4, 1, 0),
+                       BPF_STMT(BPF_RET | BPF_K, 40),
+                       BPF_STMT(BPF_RET | BPF_K, MAX_K)
+               },
+               SKB,
+               { 1, 2, 3, 4, 5 },
+               { { 1, 20 }, { 3, 40 }, { 5, MAX_K } },
+       },
+       {
+               "JSET",
+               .insns = {
+                       BPF_JUMP(BPF_JMP | BPF_JA, 0, 0, 0),
+                       BPF_JUMP(BPF_JMP | BPF_JA, 1, 1, 1),
+                       BPF_JUMP(BPF_JMP | BPF_JA, 0, 0, 0),
+                       BPF_JUMP(BPF_JMP | BPF_JA, 0, 0, 0),
+                       BPF_STMT(BPF_LDX | BPF_LEN, 0),
+                       BPF_STMT(BPF_MISC | BPF_TXA, 0),
+                       BPF_STMT(BPF_ALU | BPF_SUB | BPF_K, 4),
+                       BPF_STMT(BPF_MISC | BPF_TAX, 0),
+                       BPF_STMT(BPF_LD | BPF_W | BPF_IND, 0),
+                       BPF_JUMP(BPF_JMP | BPF_JSET | BPF_K, 1, 0, 1),
+                       BPF_STMT(BPF_RET | BPF_K, 10),
+                       BPF_JUMP(BPF_JMP | BPF_JSET | BPF_K, 0x80000000, 0, 1),
+                       BPF_STMT(BPF_RET | BPF_K, 20),
+                       BPF_JUMP(BPF_JMP | BPF_JSET | BPF_K, 0xffffff, 1, 0),
+                       BPF_STMT(BPF_RET | BPF_K, 30),
+                       BPF_JUMP(BPF_JMP | BPF_JSET | BPF_K, 0xffffff, 1, 0),
+                       BPF_STMT(BPF_RET | BPF_K, 30),
+                       BPF_JUMP(BPF_JMP | BPF_JSET | BPF_K, 0xffffff, 1, 0),
+                       BPF_STMT(BPF_RET | BPF_K, 30),
+                       BPF_JUMP(BPF_JMP | BPF_JSET | BPF_K, 0xffffff, 1, 0),
+                       BPF_STMT(BPF_RET | BPF_K, 30),
+                       BPF_JUMP(BPF_JMP | BPF_JSET | BPF_K, 0xffffff, 1, 0),
+                       BPF_STMT(BPF_RET | BPF_K, 30),
+                       BPF_STMT(BPF_RET | BPF_K, MAX_K)
+               },
+               SKB,
+               { 0, 0xAA, 0x55, 1 },
+               { { 4, 10 }, { 5, 20 }, { 6, MAX_K } },
+       },
+       {
+               "tcpdump port 22",
+               .insns = {
+                       { 0x28,  0,  0, 0x0000000c },
+                       { 0x15,  0,  8, 0x000086dd },
+                       { 0x30,  0,  0, 0x00000014 },
+                       { 0x15,  2,  0, 0x00000084 },
+                       { 0x15,  1,  0, 0x00000006 },
+                       { 0x15,  0, 17, 0x00000011 },
+                       { 0x28,  0,  0, 0x00000036 },
+                       { 0x15, 14,  0, 0x00000016 },
+                       { 0x28,  0,  0, 0x00000038 },
+                       { 0x15, 12, 13, 0x00000016 },
+                       { 0x15,  0, 12, 0x00000800 },
+                       { 0x30,  0,  0, 0x00000017 },
+                       { 0x15,  2,  0, 0x00000084 },
+                       { 0x15,  1,  0, 0x00000006 },
+                       { 0x15,  0,  8, 0x00000011 },
+                       { 0x28,  0,  0, 0x00000014 },
+                       { 0x45,  6,  0, 0x00001fff },
+                       { 0xb1,  0,  0, 0x0000000e },
+                       { 0x48,  0,  0, 0x0000000e },
+                       { 0x15,  2,  0, 0x00000016 },
+                       { 0x48,  0,  0, 0x00000010 },
+                       { 0x15,  0,  1, 0x00000016 },
+                       { 0x06,  0,  0, 0x0000ffff },
+                       { 0x06,  0,  0, 0x00000000 },
+               },
+               SKB,
+               /* 3c:07:54:43:e5:76 > 10:bf:48:d6:43:d6, ethertype IPv4(0x0800)
+                * length 114: 10.1.1.149.49700 > 10.1.2.10.22: Flags [P.],
+                * seq 1305692979:1305693027, ack 3650467037, win 65535,
+                * options [nop,nop,TS val 2502645400 ecr 3971138], length 48
+                */
+               { 0x10, 0xbf, 0x48, 0xd6, 0x43, 0xd6,
+                 0x3c, 0x07, 0x54, 0x43, 0xe5, 0x76,
+                 0x08, 0x00,
+                 0x45, 0x10, 0x00, 0x64, 0x75, 0xb5,
+                 0x40, 0x00, 0x40, 0x06, 0xad, 0x2e, /* IP header */
+                 0x0a, 0x01, 0x01, 0x95, /* ip src */
+                 0x0a, 0x01, 0x02, 0x0a, /* ip dst */
+                 0xc2, 0x24,
+                 0x00, 0x16 /* dst port */ },
+               { { 10, 0 }, { 30, 0 }, { 100, 65535 } },
+       },
+       {
+               "tcpdump complex",
+               .insns = {
+                       /* tcpdump -nei eth0 'tcp port 22 and (((ip[2:2] -
+                        * ((ip[0]&0xf)<<2)) - ((tcp[12]&0xf0)>>2)) != 0) and
+                        * (len > 115 or len < 30000000000)' -d
+                        */
+                       { 0x28,  0,  0, 0x0000000c },
+                       { 0x15, 30,  0, 0x000086dd },
+                       { 0x15,  0, 29, 0x00000800 },
+                       { 0x30,  0,  0, 0x00000017 },
+                       { 0x15,  0, 27, 0x00000006 },
+                       { 0x28,  0,  0, 0x00000014 },
+                       { 0x45, 25,  0, 0x00001fff },
+                       { 0xb1,  0,  0, 0x0000000e },
+                       { 0x48,  0,  0, 0x0000000e },
+                       { 0x15,  2,  0, 0x00000016 },
+                       { 0x48,  0,  0, 0x00000010 },
+                       { 0x15,  0, 20, 0x00000016 },
+                       { 0x28,  0,  0, 0x00000010 },
+                       { 0x02,  0,  0, 0x00000001 },
+                       { 0x30,  0,  0, 0x0000000e },
+                       { 0x54,  0,  0, 0x0000000f },
+                       { 0x64,  0,  0, 0x00000002 },
+                       { 0x07,  0,  0, 0x00000005 },
+                       { 0x60,  0,  0, 0x00000001 },
+                       { 0x1c,  0,  0, 0x00000000 },
+                       { 0x02,  0,  0, 0x00000005 },
+                       { 0xb1,  0,  0, 0x0000000e },
+                       { 0x50,  0,  0, 0x0000001a },
+                       { 0x54,  0,  0, 0x000000f0 },
+                       { 0x74,  0,  0, 0x00000002 },
+                       { 0x07,  0,  0, 0x00000009 },
+                       { 0x60,  0,  0, 0x00000005 },
+                       { 0x1d,  4,  0, 0x00000000 },
+                       { 0x80,  0,  0, 0x00000000 },
+                       { 0x25,  1,  0, 0x00000073 },
+                       { 0x35,  1,  0, 0xfc23ac00 },
+                       { 0x06,  0,  0, 0x0000ffff },
+                       { 0x06,  0,  0, 0x00000000 },
+               },
+               SKB,
+               { 0x10, 0xbf, 0x48, 0xd6, 0x43, 0xd6,
+                 0x3c, 0x07, 0x54, 0x43, 0xe5, 0x76,
+                 0x08, 0x00,
+                 0x45, 0x10, 0x00, 0x64, 0x75, 0xb5,
+                 0x40, 0x00, 0x40, 0x06, 0xad, 0x2e, /* IP header */
+                 0x0a, 0x01, 0x01, 0x95, /* ip src */
+                 0x0a, 0x01, 0x02, 0x0a, /* ip dst */
+                 0xc2, 0x24,
+                 0x00, 0x16 /* dst port */ },
+               { { 10, 0 }, { 30, 0 }, { 100, 65535 } },
+       },
+       {
+               "RET_A",
+               .insns = {
+                       /* check that unitialized X and A contain zeros */
+                       BPF_STMT(BPF_MISC | BPF_TXA, 0),
+                       BPF_STMT(BPF_RET | BPF_A, 0)
+               },
+               SKB,
+               {},
+               { {1, 0}, {2, 0} },
+       },
+       {
+               "INT: ADD trivial",
+               .insns_int = {
+                       BPF_ALU64_IMM(BPF_MOV, R1, 1),
+                       BPF_ALU64_IMM(BPF_ADD, R1, 2),
+                       BPF_ALU64_IMM(BPF_MOV, R2, 3),
+                       BPF_ALU64_REG(BPF_SUB, R1, R2),
+                       BPF_ALU64_IMM(BPF_ADD, R1, -1),
+                       BPF_ALU64_IMM(BPF_MUL, R1, 3),
+                       BPF_ALU64_REG(BPF_MOV, R0, R1),
+                       BPF_EXIT_INSN(),
+               },
+               SKB_INT,
+               { },
+               { { 0, 0xfffffffd } }
+       },
+       {
+               "INT: MUL_X",
+               .insns_int = {
+                       BPF_ALU64_IMM(BPF_MOV, R0, -1),
+                       BPF_ALU64_IMM(BPF_MOV, R1, -1),
+                       BPF_ALU64_IMM(BPF_MOV, R2, 3),
+                       BPF_ALU64_REG(BPF_MUL, R1, R2),
+                       BPF_JMP_IMM(BPF_JEQ, R1, 0xfffffffd, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_ALU64_IMM(BPF_MOV, R0, 1),
+                       BPF_EXIT_INSN(),
+               },
+               SKB_INT,
+               { },
+               { { 0, 1 } }
+       },
+       {
+               "INT: MUL_X2",
+               .insns_int = {
+                       BPF_ALU32_IMM(BPF_MOV, R0, -1),
+                       BPF_ALU32_IMM(BPF_MOV, R1, -1),
+                       BPF_ALU32_IMM(BPF_MOV, R2, 3),
+                       BPF_ALU64_REG(BPF_MUL, R1, R2),
+                       BPF_ALU64_IMM(BPF_RSH, R1, 8),
+                       BPF_JMP_IMM(BPF_JEQ, R1, 0x2ffffff, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_ALU32_IMM(BPF_MOV, R0, 1),
+                       BPF_EXIT_INSN(),
+               },
+               SKB_INT,
+               { },
+               { { 0, 1 } }
+       },
+       {
+               "INT: MUL32_X",
+               .insns_int = {
+                       BPF_ALU32_IMM(BPF_MOV, R0, -1),
+                       BPF_ALU64_IMM(BPF_MOV, R1, -1),
+                       BPF_ALU32_IMM(BPF_MOV, R2, 3),
+                       BPF_ALU32_REG(BPF_MUL, R1, R2),
+                       BPF_ALU64_IMM(BPF_RSH, R1, 8),
+                       BPF_JMP_IMM(BPF_JEQ, R1, 0xffffff, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_ALU32_IMM(BPF_MOV, R0, 1),
+                       BPF_EXIT_INSN(),
+               },
+               SKB_INT,
+               { },
+               { { 0, 1 } }
+       },
+       {
+               /* Have to test all register combinations, since
+                * JITing of different registers will produce
+                * different asm code.
+                */
+               "INT: ADD 64-bit",
+               .insns_int = {
+                       BPF_ALU64_IMM(BPF_MOV, R0, 0),
+                       BPF_ALU64_IMM(BPF_MOV, R1, 1),
+                       BPF_ALU64_IMM(BPF_MOV, R2, 2),
+                       BPF_ALU64_IMM(BPF_MOV, R3, 3),
+                       BPF_ALU64_IMM(BPF_MOV, R4, 4),
+                       BPF_ALU64_IMM(BPF_MOV, R5, 5),
+                       BPF_ALU64_IMM(BPF_MOV, R6, 6),
+                       BPF_ALU64_IMM(BPF_MOV, R7, 7),
+                       BPF_ALU64_IMM(BPF_MOV, R8, 8),
+                       BPF_ALU64_IMM(BPF_MOV, R9, 9),
+                       BPF_ALU64_IMM(BPF_ADD, R0, 20),
+                       BPF_ALU64_IMM(BPF_ADD, R1, 20),
+                       BPF_ALU64_IMM(BPF_ADD, R2, 20),
+                       BPF_ALU64_IMM(BPF_ADD, R3, 20),
+                       BPF_ALU64_IMM(BPF_ADD, R4, 20),
+                       BPF_ALU64_IMM(BPF_ADD, R5, 20),
+                       BPF_ALU64_IMM(BPF_ADD, R6, 20),
+                       BPF_ALU64_IMM(BPF_ADD, R7, 20),
+                       BPF_ALU64_IMM(BPF_ADD, R8, 20),
+                       BPF_ALU64_IMM(BPF_ADD, R9, 20),
+                       BPF_ALU64_IMM(BPF_SUB, R0, 10),
+                       BPF_ALU64_IMM(BPF_SUB, R1, 10),
+                       BPF_ALU64_IMM(BPF_SUB, R2, 10),
+                       BPF_ALU64_IMM(BPF_SUB, R3, 10),
+                       BPF_ALU64_IMM(BPF_SUB, R4, 10),
+                       BPF_ALU64_IMM(BPF_SUB, R5, 10),
+                       BPF_ALU64_IMM(BPF_SUB, R6, 10),
+                       BPF_ALU64_IMM(BPF_SUB, R7, 10),
+                       BPF_ALU64_IMM(BPF_SUB, R8, 10),
+                       BPF_ALU64_IMM(BPF_SUB, R9, 10),
+                       BPF_ALU64_REG(BPF_ADD, R0, R0),
+                       BPF_ALU64_REG(BPF_ADD, R0, R1),
+                       BPF_ALU64_REG(BPF_ADD, R0, R2),
+                       BPF_ALU64_REG(BPF_ADD, R0, R3),
+                       BPF_ALU64_REG(BPF_ADD, R0, R4),
+                       BPF_ALU64_REG(BPF_ADD, R0, R5),
+                       BPF_ALU64_REG(BPF_ADD, R0, R6),
+                       BPF_ALU64_REG(BPF_ADD, R0, R7),
+                       BPF_ALU64_REG(BPF_ADD, R0, R8),
+                       BPF_ALU64_REG(BPF_ADD, R0, R9), /* R0 == 155 */
+                       BPF_JMP_IMM(BPF_JEQ, R0, 155, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_ALU64_REG(BPF_ADD, R1, R0),
+                       BPF_ALU64_REG(BPF_ADD, R1, R1),
+                       BPF_ALU64_REG(BPF_ADD, R1, R2),
+                       BPF_ALU64_REG(BPF_ADD, R1, R3),
+                       BPF_ALU64_REG(BPF_ADD, R1, R4),
+                       BPF_ALU64_REG(BPF_ADD, R1, R5),
+                       BPF_ALU64_REG(BPF_ADD, R1, R6),
+                       BPF_ALU64_REG(BPF_ADD, R1, R7),
+                       BPF_ALU64_REG(BPF_ADD, R1, R8),
+                       BPF_ALU64_REG(BPF_ADD, R1, R9), /* R1 == 456 */
+                       BPF_JMP_IMM(BPF_JEQ, R1, 456, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_ALU64_REG(BPF_ADD, R2, R0),
+                       BPF_ALU64_REG(BPF_ADD, R2, R1),
+                       BPF_ALU64_REG(BPF_ADD, R2, R2),
+                       BPF_ALU64_REG(BPF_ADD, R2, R3),
+                       BPF_ALU64_REG(BPF_ADD, R2, R4),
+                       BPF_ALU64_REG(BPF_ADD, R2, R5),
+                       BPF_ALU64_REG(BPF_ADD, R2, R6),
+                       BPF_ALU64_REG(BPF_ADD, R2, R7),
+                       BPF_ALU64_REG(BPF_ADD, R2, R8),
+                       BPF_ALU64_REG(BPF_ADD, R2, R9), /* R2 == 1358 */
+                       BPF_JMP_IMM(BPF_JEQ, R2, 1358, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_ALU64_REG(BPF_ADD, R3, R0),
+                       BPF_ALU64_REG(BPF_ADD, R3, R1),
+                       BPF_ALU64_REG(BPF_ADD, R3, R2),
+                       BPF_ALU64_REG(BPF_ADD, R3, R3),
+                       BPF_ALU64_REG(BPF_ADD, R3, R4),
+                       BPF_ALU64_REG(BPF_ADD, R3, R5),
+                       BPF_ALU64_REG(BPF_ADD, R3, R6),
+                       BPF_ALU64_REG(BPF_ADD, R3, R7),
+                       BPF_ALU64_REG(BPF_ADD, R3, R8),
+                       BPF_ALU64_REG(BPF_ADD, R3, R9), /* R3 == 4063 */
+                       BPF_JMP_IMM(BPF_JEQ, R3, 4063, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_ALU64_REG(BPF_ADD, R4, R0),
+                       BPF_ALU64_REG(BPF_ADD, R4, R1),
+                       BPF_ALU64_REG(BPF_ADD, R4, R2),
+                       BPF_ALU64_REG(BPF_ADD, R4, R3),
+                       BPF_ALU64_REG(BPF_ADD, R4, R4),
+                       BPF_ALU64_REG(BPF_ADD, R4, R5),
+                       BPF_ALU64_REG(BPF_ADD, R4, R6),
+                       BPF_ALU64_REG(BPF_ADD, R4, R7),
+                       BPF_ALU64_REG(BPF_ADD, R4, R8),
+                       BPF_ALU64_REG(BPF_ADD, R4, R9), /* R4 == 12177 */
+                       BPF_JMP_IMM(BPF_JEQ, R4, 12177, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_ALU64_REG(BPF_ADD, R5, R0),
+                       BPF_ALU64_REG(BPF_ADD, R5, R1),
+                       BPF_ALU64_REG(BPF_ADD, R5, R2),
+                       BPF_ALU64_REG(BPF_ADD, R5, R3),
+                       BPF_ALU64_REG(BPF_ADD, R5, R4),
+                       BPF_ALU64_REG(BPF_ADD, R5, R5),
+                       BPF_ALU64_REG(BPF_ADD, R5, R6),
+                       BPF_ALU64_REG(BPF_ADD, R5, R7),
+                       BPF_ALU64_REG(BPF_ADD, R5, R8),
+                       BPF_ALU64_REG(BPF_ADD, R5, R9), /* R5 == 36518 */
+                       BPF_JMP_IMM(BPF_JEQ, R5, 36518, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_ALU64_REG(BPF_ADD, R6, R0),
+                       BPF_ALU64_REG(BPF_ADD, R6, R1),
+                       BPF_ALU64_REG(BPF_ADD, R6, R2),
+                       BPF_ALU64_REG(BPF_ADD, R6, R3),
+                       BPF_ALU64_REG(BPF_ADD, R6, R4),
+                       BPF_ALU64_REG(BPF_ADD, R6, R5),
+                       BPF_ALU64_REG(BPF_ADD, R6, R6),
+                       BPF_ALU64_REG(BPF_ADD, R6, R7),
+                       BPF_ALU64_REG(BPF_ADD, R6, R8),
+                       BPF_ALU64_REG(BPF_ADD, R6, R9), /* R6 == 109540 */
+                       BPF_JMP_IMM(BPF_JEQ, R6, 109540, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_ALU64_REG(BPF_ADD, R7, R0),
+                       BPF_ALU64_REG(BPF_ADD, R7, R1),
+                       BPF_ALU64_REG(BPF_ADD, R7, R2),
+                       BPF_ALU64_REG(BPF_ADD, R7, R3),
+                       BPF_ALU64_REG(BPF_ADD, R7, R4),
+                       BPF_ALU64_REG(BPF_ADD, R7, R5),
+                       BPF_ALU64_REG(BPF_ADD, R7, R6),
+                       BPF_ALU64_REG(BPF_ADD, R7, R7),
+                       BPF_ALU64_REG(BPF_ADD, R7, R8),
+                       BPF_ALU64_REG(BPF_ADD, R7, R9), /* R7 == 328605 */
+                       BPF_JMP_IMM(BPF_JEQ, R7, 328605, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_ALU64_REG(BPF_ADD, R8, R0),
+                       BPF_ALU64_REG(BPF_ADD, R8, R1),
+                       BPF_ALU64_REG(BPF_ADD, R8, R2),
+                       BPF_ALU64_REG(BPF_ADD, R8, R3),
+                       BPF_ALU64_REG(BPF_ADD, R8, R4),
+                       BPF_ALU64_REG(BPF_ADD, R8, R5),
+                       BPF_ALU64_REG(BPF_ADD, R8, R6),
+                       BPF_ALU64_REG(BPF_ADD, R8, R7),
+                       BPF_ALU64_REG(BPF_ADD, R8, R8),
+                       BPF_ALU64_REG(BPF_ADD, R8, R9), /* R8 == 985799 */
+                       BPF_JMP_IMM(BPF_JEQ, R8, 985799, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_ALU64_REG(BPF_ADD, R9, R0),
+                       BPF_ALU64_REG(BPF_ADD, R9, R1),
+                       BPF_ALU64_REG(BPF_ADD, R9, R2),
+                       BPF_ALU64_REG(BPF_ADD, R9, R3),
+                       BPF_ALU64_REG(BPF_ADD, R9, R4),
+                       BPF_ALU64_REG(BPF_ADD, R9, R5),
+                       BPF_ALU64_REG(BPF_ADD, R9, R6),
+                       BPF_ALU64_REG(BPF_ADD, R9, R7),
+                       BPF_ALU64_REG(BPF_ADD, R9, R8),
+                       BPF_ALU64_REG(BPF_ADD, R9, R9), /* R9 == 2957380 */
+                       BPF_ALU64_REG(BPF_MOV, R0, R9),
+                       BPF_EXIT_INSN(),
+               },
+               SKB_INT,
+               { },
+               { { 0, 2957380 } }
+       },
+       {
+               "INT: ADD 32-bit",
+               .insns_int = {
+                       BPF_ALU32_IMM(BPF_MOV, R0, 20),
+                       BPF_ALU32_IMM(BPF_MOV, R1, 1),
+                       BPF_ALU32_IMM(BPF_MOV, R2, 2),
+                       BPF_ALU32_IMM(BPF_MOV, R3, 3),
+                       BPF_ALU32_IMM(BPF_MOV, R4, 4),
+                       BPF_ALU32_IMM(BPF_MOV, R5, 5),
+                       BPF_ALU32_IMM(BPF_MOV, R6, 6),
+                       BPF_ALU32_IMM(BPF_MOV, R7, 7),
+                       BPF_ALU32_IMM(BPF_MOV, R8, 8),
+                       BPF_ALU32_IMM(BPF_MOV, R9, 9),
+                       BPF_ALU64_IMM(BPF_ADD, R1, 10),
+                       BPF_ALU64_IMM(BPF_ADD, R2, 10),
+                       BPF_ALU64_IMM(BPF_ADD, R3, 10),
+                       BPF_ALU64_IMM(BPF_ADD, R4, 10),
+                       BPF_ALU64_IMM(BPF_ADD, R5, 10),
+                       BPF_ALU64_IMM(BPF_ADD, R6, 10),
+                       BPF_ALU64_IMM(BPF_ADD, R7, 10),
+                       BPF_ALU64_IMM(BPF_ADD, R8, 10),
+                       BPF_ALU64_IMM(BPF_ADD, R9, 10),
+                       BPF_ALU32_REG(BPF_ADD, R0, R1),
+                       BPF_ALU32_REG(BPF_ADD, R0, R2),
+                       BPF_ALU32_REG(BPF_ADD, R0, R3),
+                       BPF_ALU32_REG(BPF_ADD, R0, R4),
+                       BPF_ALU32_REG(BPF_ADD, R0, R5),
+                       BPF_ALU32_REG(BPF_ADD, R0, R6),
+                       BPF_ALU32_REG(BPF_ADD, R0, R7),
+                       BPF_ALU32_REG(BPF_ADD, R0, R8),
+                       BPF_ALU32_REG(BPF_ADD, R0, R9), /* R0 == 155 */
+                       BPF_JMP_IMM(BPF_JEQ, R0, 155, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_ALU32_REG(BPF_ADD, R1, R0),
+                       BPF_ALU32_REG(BPF_ADD, R1, R1),
+                       BPF_ALU32_REG(BPF_ADD, R1, R2),
+                       BPF_ALU32_REG(BPF_ADD, R1, R3),
+                       BPF_ALU32_REG(BPF_ADD, R1, R4),
+                       BPF_ALU32_REG(BPF_ADD, R1, R5),
+                       BPF_ALU32_REG(BPF_ADD, R1, R6),
+                       BPF_ALU32_REG(BPF_ADD, R1, R7),
+                       BPF_ALU32_REG(BPF_ADD, R1, R8),
+                       BPF_ALU32_REG(BPF_ADD, R1, R9), /* R1 == 456 */
+                       BPF_JMP_IMM(BPF_JEQ, R1, 456, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_ALU32_REG(BPF_ADD, R2, R0),
+                       BPF_ALU32_REG(BPF_ADD, R2, R1),
+                       BPF_ALU32_REG(BPF_ADD, R2, R2),
+                       BPF_ALU32_REG(BPF_ADD, R2, R3),
+                       BPF_ALU32_REG(BPF_ADD, R2, R4),
+                       BPF_ALU32_REG(BPF_ADD, R2, R5),
+                       BPF_ALU32_REG(BPF_ADD, R2, R6),
+                       BPF_ALU32_REG(BPF_ADD, R2, R7),
+                       BPF_ALU32_REG(BPF_ADD, R2, R8),
+                       BPF_ALU32_REG(BPF_ADD, R2, R9), /* R2 == 1358 */
+                       BPF_JMP_IMM(BPF_JEQ, R2, 1358, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_ALU32_REG(BPF_ADD, R3, R0),
+                       BPF_ALU32_REG(BPF_ADD, R3, R1),
+                       BPF_ALU32_REG(BPF_ADD, R3, R2),
+                       BPF_ALU32_REG(BPF_ADD, R3, R3),
+                       BPF_ALU32_REG(BPF_ADD, R3, R4),
+                       BPF_ALU32_REG(BPF_ADD, R3, R5),
+                       BPF_ALU32_REG(BPF_ADD, R3, R6),
+                       BPF_ALU32_REG(BPF_ADD, R3, R7),
+                       BPF_ALU32_REG(BPF_ADD, R3, R8),
+                       BPF_ALU32_REG(BPF_ADD, R3, R9), /* R3 == 4063 */
+                       BPF_JMP_IMM(BPF_JEQ, R3, 4063, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_ALU32_REG(BPF_ADD, R4, R0),
+                       BPF_ALU32_REG(BPF_ADD, R4, R1),
+                       BPF_ALU32_REG(BPF_ADD, R4, R2),
+                       BPF_ALU32_REG(BPF_ADD, R4, R3),
+                       BPF_ALU32_REG(BPF_ADD, R4, R4),
+                       BPF_ALU32_REG(BPF_ADD, R4, R5),
+                       BPF_ALU32_REG(BPF_ADD, R4, R6),
+                       BPF_ALU32_REG(BPF_ADD, R4, R7),
+                       BPF_ALU32_REG(BPF_ADD, R4, R8),
+                       BPF_ALU32_REG(BPF_ADD, R4, R9), /* R4 == 12177 */
+                       BPF_JMP_IMM(BPF_JEQ, R4, 12177, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_ALU32_REG(BPF_ADD, R5, R0),
+                       BPF_ALU32_REG(BPF_ADD, R5, R1),
+                       BPF_ALU32_REG(BPF_ADD, R5, R2),
+                       BPF_ALU32_REG(BPF_ADD, R5, R3),
+                       BPF_ALU32_REG(BPF_ADD, R5, R4),
+                       BPF_ALU32_REG(BPF_ADD, R5, R5),
+                       BPF_ALU32_REG(BPF_ADD, R5, R6),
+                       BPF_ALU32_REG(BPF_ADD, R5, R7),
+                       BPF_ALU32_REG(BPF_ADD, R5, R8),
+                       BPF_ALU32_REG(BPF_ADD, R5, R9), /* R5 == 36518 */
+                       BPF_JMP_IMM(BPF_JEQ, R5, 36518, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_ALU32_REG(BPF_ADD, R6, R0),
+                       BPF_ALU32_REG(BPF_ADD, R6, R1),
+                       BPF_ALU32_REG(BPF_ADD, R6, R2),
+                       BPF_ALU32_REG(BPF_ADD, R6, R3),
+                       BPF_ALU32_REG(BPF_ADD, R6, R4),
+                       BPF_ALU32_REG(BPF_ADD, R6, R5),
+                       BPF_ALU32_REG(BPF_ADD, R6, R6),
+                       BPF_ALU32_REG(BPF_ADD, R6, R7),
+                       BPF_ALU32_REG(BPF_ADD, R6, R8),
+                       BPF_ALU32_REG(BPF_ADD, R6, R9), /* R6 == 109540 */
+                       BPF_JMP_IMM(BPF_JEQ, R6, 109540, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_ALU32_REG(BPF_ADD, R7, R0),
+                       BPF_ALU32_REG(BPF_ADD, R7, R1),
+                       BPF_ALU32_REG(BPF_ADD, R7, R2),
+                       BPF_ALU32_REG(BPF_ADD, R7, R3),
+                       BPF_ALU32_REG(BPF_ADD, R7, R4),
+                       BPF_ALU32_REG(BPF_ADD, R7, R5),
+                       BPF_ALU32_REG(BPF_ADD, R7, R6),
+                       BPF_ALU32_REG(BPF_ADD, R7, R7),
+                       BPF_ALU32_REG(BPF_ADD, R7, R8),
+                       BPF_ALU32_REG(BPF_ADD, R7, R9), /* R7 == 328605 */
+                       BPF_JMP_IMM(BPF_JEQ, R7, 328605, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_ALU32_REG(BPF_ADD, R8, R0),
+                       BPF_ALU32_REG(BPF_ADD, R8, R1),
+                       BPF_ALU32_REG(BPF_ADD, R8, R2),
+                       BPF_ALU32_REG(BPF_ADD, R8, R3),
+                       BPF_ALU32_REG(BPF_ADD, R8, R4),
+                       BPF_ALU32_REG(BPF_ADD, R8, R5),
+                       BPF_ALU32_REG(BPF_ADD, R8, R6),
+                       BPF_ALU32_REG(BPF_ADD, R8, R7),
+                       BPF_ALU32_REG(BPF_ADD, R8, R8),
+                       BPF_ALU32_REG(BPF_ADD, R8, R9), /* R8 == 985799 */
+                       BPF_JMP_IMM(BPF_JEQ, R8, 985799, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_ALU32_REG(BPF_ADD, R9, R0),
+                       BPF_ALU32_REG(BPF_ADD, R9, R1),
+                       BPF_ALU32_REG(BPF_ADD, R9, R2),
+                       BPF_ALU32_REG(BPF_ADD, R9, R3),
+                       BPF_ALU32_REG(BPF_ADD, R9, R4),
+                       BPF_ALU32_REG(BPF_ADD, R9, R5),
+                       BPF_ALU32_REG(BPF_ADD, R9, R6),
+                       BPF_ALU32_REG(BPF_ADD, R9, R7),
+                       BPF_ALU32_REG(BPF_ADD, R9, R8),
+                       BPF_ALU32_REG(BPF_ADD, R9, R9), /* R9 == 2957380 */
+                       BPF_ALU32_REG(BPF_MOV, R0, R9),
+                       BPF_EXIT_INSN(),
+               },
+               SKB_INT,
+               { },
+               { { 0, 2957380 } }
+       },
+       {       /* Mainly checking JIT here. */
+               "INT: SUB",
+               .insns_int = {
+                       BPF_ALU64_IMM(BPF_MOV, R0, 0),
+                       BPF_ALU64_IMM(BPF_MOV, R1, 1),
+                       BPF_ALU64_IMM(BPF_MOV, R2, 2),
+                       BPF_ALU64_IMM(BPF_MOV, R3, 3),
+                       BPF_ALU64_IMM(BPF_MOV, R4, 4),
+                       BPF_ALU64_IMM(BPF_MOV, R5, 5),
+                       BPF_ALU64_IMM(BPF_MOV, R6, 6),
+                       BPF_ALU64_IMM(BPF_MOV, R7, 7),
+                       BPF_ALU64_IMM(BPF_MOV, R8, 8),
+                       BPF_ALU64_IMM(BPF_MOV, R9, 9),
+                       BPF_ALU64_REG(BPF_SUB, R0, R0),
+                       BPF_ALU64_REG(BPF_SUB, R0, R1),
+                       BPF_ALU64_REG(BPF_SUB, R0, R2),
+                       BPF_ALU64_REG(BPF_SUB, R0, R3),
+                       BPF_ALU64_REG(BPF_SUB, R0, R4),
+                       BPF_ALU64_REG(BPF_SUB, R0, R5),
+                       BPF_ALU64_REG(BPF_SUB, R0, R6),
+                       BPF_ALU64_REG(BPF_SUB, R0, R7),
+                       BPF_ALU64_REG(BPF_SUB, R0, R8),
+                       BPF_ALU64_REG(BPF_SUB, R0, R9),
+                       BPF_ALU64_IMM(BPF_SUB, R0, 10),
+                       BPF_JMP_IMM(BPF_JEQ, R0, -55, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_ALU64_REG(BPF_SUB, R1, R0),
+                       BPF_ALU64_REG(BPF_SUB, R1, R2),
+                       BPF_ALU64_REG(BPF_SUB, R1, R3),
+                       BPF_ALU64_REG(BPF_SUB, R1, R4),
+                       BPF_ALU64_REG(BPF_SUB, R1, R5),
+                       BPF_ALU64_REG(BPF_SUB, R1, R6),
+                       BPF_ALU64_REG(BPF_SUB, R1, R7),
+                       BPF_ALU64_REG(BPF_SUB, R1, R8),
+                       BPF_ALU64_REG(BPF_SUB, R1, R9),
+                       BPF_ALU64_IMM(BPF_SUB, R1, 10),
+                       BPF_ALU64_REG(BPF_SUB, R2, R0),
+                       BPF_ALU64_REG(BPF_SUB, R2, R1),
+                       BPF_ALU64_REG(BPF_SUB, R2, R3),
+                       BPF_ALU64_REG(BPF_SUB, R2, R4),
+                       BPF_ALU64_REG(BPF_SUB, R2, R5),
+                       BPF_ALU64_REG(BPF_SUB, R2, R6),
+                       BPF_ALU64_REG(BPF_SUB, R2, R7),
+                       BPF_ALU64_REG(BPF_SUB, R2, R8),
+                       BPF_ALU64_REG(BPF_SUB, R2, R9),
+                       BPF_ALU64_IMM(BPF_SUB, R2, 10),
+                       BPF_ALU64_REG(BPF_SUB, R3, R0),
+                       BPF_ALU64_REG(BPF_SUB, R3, R1),
+                       BPF_ALU64_REG(BPF_SUB, R3, R2),
+                       BPF_ALU64_REG(BPF_SUB, R3, R4),
+                       BPF_ALU64_REG(BPF_SUB, R3, R5),
+                       BPF_ALU64_REG(BPF_SUB, R3, R6),
+                       BPF_ALU64_REG(BPF_SUB, R3, R7),
+                       BPF_ALU64_REG(BPF_SUB, R3, R8),
+                       BPF_ALU64_REG(BPF_SUB, R3, R9),
+                       BPF_ALU64_IMM(BPF_SUB, R3, 10),
+                       BPF_ALU64_REG(BPF_SUB, R4, R0),
+                       BPF_ALU64_REG(BPF_SUB, R4, R1),
+                       BPF_ALU64_REG(BPF_SUB, R4, R2),
+                       BPF_ALU64_REG(BPF_SUB, R4, R3),
+                       BPF_ALU64_REG(BPF_SUB, R4, R5),
+                       BPF_ALU64_REG(BPF_SUB, R4, R6),
+                       BPF_ALU64_REG(BPF_SUB, R4, R7),
+                       BPF_ALU64_REG(BPF_SUB, R4, R8),
+                       BPF_ALU64_REG(BPF_SUB, R4, R9),
+                       BPF_ALU64_IMM(BPF_SUB, R4, 10),
+                       BPF_ALU64_REG(BPF_SUB, R5, R0),
+                       BPF_ALU64_REG(BPF_SUB, R5, R1),
+                       BPF_ALU64_REG(BPF_SUB, R5, R2),
+                       BPF_ALU64_REG(BPF_SUB, R5, R3),
+                       BPF_ALU64_REG(BPF_SUB, R5, R4),
+                       BPF_ALU64_REG(BPF_SUB, R5, R6),
+                       BPF_ALU64_REG(BPF_SUB, R5, R7),
+                       BPF_ALU64_REG(BPF_SUB, R5, R8),
+                       BPF_ALU64_REG(BPF_SUB, R5, R9),
+                       BPF_ALU64_IMM(BPF_SUB, R5, 10),
+                       BPF_ALU64_REG(BPF_SUB, R6, R0),
+                       BPF_ALU64_REG(BPF_SUB, R6, R1),
+                       BPF_ALU64_REG(BPF_SUB, R6, R2),
+                       BPF_ALU64_REG(BPF_SUB, R6, R3),
+                       BPF_ALU64_REG(BPF_SUB, R6, R4),
+                       BPF_ALU64_REG(BPF_SUB, R6, R5),
+                       BPF_ALU64_REG(BPF_SUB, R6, R7),
+                       BPF_ALU64_REG(BPF_SUB, R6, R8),
+                       BPF_ALU64_REG(BPF_SUB, R6, R9),
+                       BPF_ALU64_IMM(BPF_SUB, R6, 10),
+                       BPF_ALU64_REG(BPF_SUB, R7, R0),
+                       BPF_ALU64_REG(BPF_SUB, R7, R1),
+                       BPF_ALU64_REG(BPF_SUB, R7, R2),
+                       BPF_ALU64_REG(BPF_SUB, R7, R3),
+                       BPF_ALU64_REG(BPF_SUB, R7, R4),
+                       BPF_ALU64_REG(BPF_SUB, R7, R5),
+                       BPF_ALU64_REG(BPF_SUB, R7, R6),
+                       BPF_ALU64_REG(BPF_SUB, R7, R8),
+                       BPF_ALU64_REG(BPF_SUB, R7, R9),
+                       BPF_ALU64_IMM(BPF_SUB, R7, 10),
+                       BPF_ALU64_REG(BPF_SUB, R8, R0),
+                       BPF_ALU64_REG(BPF_SUB, R8, R1),
+                       BPF_ALU64_REG(BPF_SUB, R8, R2),
+                       BPF_ALU64_REG(BPF_SUB, R8, R3),
+                       BPF_ALU64_REG(BPF_SUB, R8, R4),
+                       BPF_ALU64_REG(BPF_SUB, R8, R5),
+                       BPF_ALU64_REG(BPF_SUB, R8, R6),
+                       BPF_ALU64_REG(BPF_SUB, R8, R7),
+                       BPF_ALU64_REG(BPF_SUB, R8, R9),
+                       BPF_ALU64_IMM(BPF_SUB, R8, 10),
+                       BPF_ALU64_REG(BPF_SUB, R9, R0),
+                       BPF_ALU64_REG(BPF_SUB, R9, R1),
+                       BPF_ALU64_REG(BPF_SUB, R9, R2),
+                       BPF_ALU64_REG(BPF_SUB, R9, R3),
+                       BPF_ALU64_REG(BPF_SUB, R9, R4),
+                       BPF_ALU64_REG(BPF_SUB, R9, R5),
+                       BPF_ALU64_REG(BPF_SUB, R9, R6),
+                       BPF_ALU64_REG(BPF_SUB, R9, R7),
+                       BPF_ALU64_REG(BPF_SUB, R9, R8),
+                       BPF_ALU64_IMM(BPF_SUB, R9, 10),
+                       BPF_ALU64_IMM(BPF_SUB, R0, 10),
+                       BPF_ALU64_IMM(BPF_NEG, R0, 0),
+                       BPF_ALU64_REG(BPF_SUB, R0, R1),
+                       BPF_ALU64_REG(BPF_SUB, R0, R2),
+                       BPF_ALU64_REG(BPF_SUB, R0, R3),
+                       BPF_ALU64_REG(BPF_SUB, R0, R4),
+                       BPF_ALU64_REG(BPF_SUB, R0, R5),
+                       BPF_ALU64_REG(BPF_SUB, R0, R6),
+                       BPF_ALU64_REG(BPF_SUB, R0, R7),
+                       BPF_ALU64_REG(BPF_SUB, R0, R8),
+                       BPF_ALU64_REG(BPF_SUB, R0, R9),
+                       BPF_EXIT_INSN(),
+               },
+               SKB_INT,
+               { },
+               { { 0, 11 } }
+       },
+       {       /* Mainly checking JIT here. */
+               "INT: XOR",
+               .insns_int = {
+                       BPF_ALU64_REG(BPF_SUB, R0, R0),
+                       BPF_ALU64_REG(BPF_XOR, R1, R1),
+                       BPF_JMP_REG(BPF_JEQ, R0, R1, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_ALU64_IMM(BPF_MOV, R0, 10),
+                       BPF_ALU64_IMM(BPF_MOV, R1, -1),
+                       BPF_ALU64_REG(BPF_SUB, R1, R1),
+                       BPF_ALU64_REG(BPF_XOR, R2, R2),
+                       BPF_JMP_REG(BPF_JEQ, R1, R2, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_ALU64_REG(BPF_SUB, R2, R2),
+                       BPF_ALU64_REG(BPF_XOR, R3, R3),
+                       BPF_ALU64_IMM(BPF_MOV, R0, 10),
+                       BPF_ALU64_IMM(BPF_MOV, R1, -1),
+                       BPF_JMP_REG(BPF_JEQ, R2, R3, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_ALU64_REG(BPF_SUB, R3, R3),
+                       BPF_ALU64_REG(BPF_XOR, R4, R4),
+                       BPF_ALU64_IMM(BPF_MOV, R2, 1),
+                       BPF_ALU64_IMM(BPF_MOV, R5, -1),
+                       BPF_JMP_REG(BPF_JEQ, R3, R4, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_ALU64_REG(BPF_SUB, R4, R4),
+                       BPF_ALU64_REG(BPF_XOR, R5, R5),
+                       BPF_ALU64_IMM(BPF_MOV, R3, 1),
+                       BPF_ALU64_IMM(BPF_MOV, R7, -1),
+                       BPF_JMP_REG(BPF_JEQ, R5, R4, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_ALU64_IMM(BPF_MOV, R5, 1),
+                       BPF_ALU64_REG(BPF_SUB, R5, R5),
+                       BPF_ALU64_REG(BPF_XOR, R6, R6),
+                       BPF_ALU64_IMM(BPF_MOV, R1, 1),
+                       BPF_ALU64_IMM(BPF_MOV, R8, -1),
+                       BPF_JMP_REG(BPF_JEQ, R5, R6, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_ALU64_REG(BPF_SUB, R6, R6),
+                       BPF_ALU64_REG(BPF_XOR, R7, R7),
+                       BPF_JMP_REG(BPF_JEQ, R7, R6, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_ALU64_REG(BPF_SUB, R7, R7),
+                       BPF_ALU64_REG(BPF_XOR, R8, R8),
+                       BPF_JMP_REG(BPF_JEQ, R7, R8, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_ALU64_REG(BPF_SUB, R8, R8),
+                       BPF_ALU64_REG(BPF_XOR, R9, R9),
+                       BPF_JMP_REG(BPF_JEQ, R9, R8, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_ALU64_REG(BPF_SUB, R9, R9),
+                       BPF_ALU64_REG(BPF_XOR, R0, R0),
+                       BPF_JMP_REG(BPF_JEQ, R9, R0, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_ALU64_REG(BPF_SUB, R1, R1),
+                       BPF_ALU64_REG(BPF_XOR, R0, R0),
+                       BPF_JMP_REG(BPF_JEQ, R9, R0, 2),
+                       BPF_ALU64_IMM(BPF_MOV, R0, 0),
+                       BPF_EXIT_INSN(),
+                       BPF_ALU64_IMM(BPF_MOV, R0, 1),
+                       BPF_EXIT_INSN(),
+               },
+               SKB_INT,
+               { },
+               { { 0, 1 } }
+       },
+       {       /* Mainly checking JIT here. */
+               "INT: MUL",
+               .insns_int = {
+                       BPF_ALU64_IMM(BPF_MOV, R0, 11),
+                       BPF_ALU64_IMM(BPF_MOV, R1, 1),
+                       BPF_ALU64_IMM(BPF_MOV, R2, 2),
+                       BPF_ALU64_IMM(BPF_MOV, R3, 3),
+                       BPF_ALU64_IMM(BPF_MOV, R4, 4),
+                       BPF_ALU64_IMM(BPF_MOV, R5, 5),
+                       BPF_ALU64_IMM(BPF_MOV, R6, 6),
+                       BPF_ALU64_IMM(BPF_MOV, R7, 7),
+                       BPF_ALU64_IMM(BPF_MOV, R8, 8),
+                       BPF_ALU64_IMM(BPF_MOV, R9, 9),
+                       BPF_ALU64_REG(BPF_MUL, R0, R0),
+                       BPF_ALU64_REG(BPF_MUL, R0, R1),
+                       BPF_ALU64_REG(BPF_MUL, R0, R2),
+                       BPF_ALU64_REG(BPF_MUL, R0, R3),
+                       BPF_ALU64_REG(BPF_MUL, R0, R4),
+                       BPF_ALU64_REG(BPF_MUL, R0, R5),
+                       BPF_ALU64_REG(BPF_MUL, R0, R6),
+                       BPF_ALU64_REG(BPF_MUL, R0, R7),
+                       BPF_ALU64_REG(BPF_MUL, R0, R8),
+                       BPF_ALU64_REG(BPF_MUL, R0, R9),
+                       BPF_ALU64_IMM(BPF_MUL, R0, 10),
+                       BPF_JMP_IMM(BPF_JEQ, R0, 439084800, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_ALU64_REG(BPF_MUL, R1, R0),
+                       BPF_ALU64_REG(BPF_MUL, R1, R2),
+                       BPF_ALU64_REG(BPF_MUL, R1, R3),
+                       BPF_ALU64_REG(BPF_MUL, R1, R4),
+                       BPF_ALU64_REG(BPF_MUL, R1, R5),
+                       BPF_ALU64_REG(BPF_MUL, R1, R6),
+                       BPF_ALU64_REG(BPF_MUL, R1, R7),
+                       BPF_ALU64_REG(BPF_MUL, R1, R8),
+                       BPF_ALU64_REG(BPF_MUL, R1, R9),
+                       BPF_ALU64_IMM(BPF_MUL, R1, 10),
+                       BPF_ALU64_REG(BPF_MOV, R2, R1),
+                       BPF_ALU64_IMM(BPF_RSH, R2, 32),
+                       BPF_JMP_IMM(BPF_JEQ, R2, 0x5a924, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_ALU64_IMM(BPF_LSH, R1, 32),
+                       BPF_ALU64_IMM(BPF_ARSH, R1, 32),
+                       BPF_JMP_IMM(BPF_JEQ, R1, 0xebb90000, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_ALU64_REG(BPF_MUL, R2, R0),
+                       BPF_ALU64_REG(BPF_MUL, R2, R1),
+                       BPF_ALU64_REG(BPF_MUL, R2, R3),
+                       BPF_ALU64_REG(BPF_MUL, R2, R4),
+                       BPF_ALU64_REG(BPF_MUL, R2, R5),
+                       BPF_ALU64_REG(BPF_MUL, R2, R6),
+                       BPF_ALU64_REG(BPF_MUL, R2, R7),
+                       BPF_ALU64_REG(BPF_MUL, R2, R8),
+                       BPF_ALU64_REG(BPF_MUL, R2, R9),
+                       BPF_ALU64_IMM(BPF_MUL, R2, 10),
+                       BPF_ALU64_IMM(BPF_RSH, R2, 32),
+                       BPF_ALU64_REG(BPF_MOV, R0, R2),
+                       BPF_EXIT_INSN(),
+               },
+               SKB_INT,
+               { },
+               { { 0, 0x35d97ef2 } }
+       },
+       {
+               "INT: ALU MIX",
+               .insns_int = {
+                       BPF_ALU64_IMM(BPF_MOV, R0, 11),
+                       BPF_ALU64_IMM(BPF_ADD, R0, -1),
+                       BPF_ALU64_IMM(BPF_MOV, R2, 2),
+                       BPF_ALU64_IMM(BPF_XOR, R2, 3),
+                       BPF_ALU64_REG(BPF_DIV, R0, R2),
+                       BPF_JMP_IMM(BPF_JEQ, R0, 10, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_ALU64_IMM(BPF_MOD, R0, 3),
+                       BPF_JMP_IMM(BPF_JEQ, R0, 1, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_ALU64_IMM(BPF_MOV, R0, -1),
+                       BPF_EXIT_INSN(),
+               },
+               SKB_INT,
+               { },
+               { { 0, -1 } }
+       },
+       {
+               "INT: DIV + ABS",
+               .insns_int = {
+                       BPF_ALU64_REG(BPF_MOV, R6, R1),
+                       BPF_LD_ABS(BPF_B, 3),
+                       BPF_ALU64_IMM(BPF_MOV, R2, 2),
+                       BPF_ALU32_REG(BPF_DIV, R0, R2),
+                       BPF_ALU64_REG(BPF_MOV, R8, R0),
+                       BPF_LD_ABS(BPF_B, 4),
+                       BPF_ALU64_REG(BPF_ADD, R8, R0),
+                       BPF_LD_IND(BPF_B, R8, -70),
+                       BPF_EXIT_INSN(),
+               },
+               SKB_INT,
+               { 10, 20, 30, 40, 50 },
+               { { 4, 0 }, { 5, 10 } }
+       },
+       {
+               "INT: DIV by zero",
+               .insns_int = {
+                       BPF_ALU64_REG(BPF_MOV, R6, R1),
+                       BPF_ALU64_IMM(BPF_MOV, R7, 0),
+                       BPF_LD_ABS(BPF_B, 3),
+                       BPF_ALU32_REG(BPF_DIV, R0, R7),
+                       BPF_EXIT_INSN(),
+               },
+               SKB_INT,
+               { 10, 20, 30, 40, 50 },
+               { { 3, 0 }, { 4, 0 } }
+       },
+       {
+               "check: missing ret",
+               .insns = {
+                       BPF_STMT(BPF_LD | BPF_IMM, 1),
+               },
+               EXPECTED_FAIL,
+               { },
+               { }
+       },
+       {
+               "check: div_k_0",
+               .insns = {
+                       BPF_STMT(BPF_ALU | BPF_DIV | BPF_K, 0),
+                       BPF_STMT(BPF_RET | BPF_K, 0)
+               },
+               EXPECTED_FAIL,
+               { },
+               { }
+       },
+       {
+               "check: unknown insn",
+               .insns = {
+                       /* seccomp insn, rejected in socket filter */
+                       BPF_STMT(BPF_LDX | BPF_W | BPF_ABS, 0),
+                       BPF_STMT(BPF_RET | BPF_K, 0)
+               },
+               EXPECTED_FAIL,
+               { },
+               { }
+       },
+       {
+               "check: out of range spill/fill",
+               .insns = {
+                       BPF_STMT(BPF_STX, 16),
+                       BPF_STMT(BPF_RET | BPF_K, 0)
+               },
+               EXPECTED_FAIL,
+               { },
+               { }
+       },
+};
+
+static int get_length(struct sock_filter *fp)
+{
+       int len = 0;
+
+       while (fp->code != 0 || fp->k != 0) {
+               fp++;
+               len++;
+       }
+
+       return len;
+}
+
+struct net_device dev;
+struct sk_buff *populate_skb(char *buf, int size)
+{
+       struct sk_buff *skb;
+
+       if (size >= MAX_DATA)
+               return NULL;
+
+       skb = alloc_skb(MAX_DATA, GFP_KERNEL);
+       if (!skb)
+               return NULL;
+
+       memcpy(__skb_put(skb, size), buf, size);
+       skb_reset_mac_header(skb);
+       skb->protocol = htons(ETH_P_IP);
+       skb->pkt_type = SKB_TYPE;
+       skb->mark = SKB_MARK;
+       skb->hash = SKB_HASH;
+       skb->queue_mapping = SKB_QUEUE_MAP;
+       skb->vlan_tci = SKB_VLAN_TCI;
+       skb->dev = &dev;
+       skb->dev->ifindex = SKB_DEV_IFINDEX;
+       skb->dev->type = SKB_DEV_TYPE;
+       skb_set_network_header(skb, min(size, ETH_HLEN));
+
+       return skb;
+}
+
+static int run_one(struct sk_filter *fp, struct bpf_test *t)
+{
+       u64 start, finish, res, cnt = 100000;
+       int err_cnt = 0, err, i, j;
+       u32 ret = 0;
+       void *data;
+
+       for (i = 0; i < MAX_SUBTESTS; i++) {
+               if (t->test[i].data_size == 0 &&
+                   t->test[i].result == 0)
+                       break;
+               if (t->data_type == SKB ||
+                   t->data_type == SKB_INT) {
+                       data = populate_skb(t->data, t->test[i].data_size);
+                       if (!data)
+                               return -ENOMEM;
+               } else {
+                       data = NULL;
+               }
+
+               start = ktime_to_us(ktime_get());
+               for (j = 0; j < cnt; j++)
+                       ret = SK_RUN_FILTER(fp, data);
+               finish = ktime_to_us(ktime_get());
+
+               res = (finish - start) * 1000;
+               do_div(res, cnt);
+
+               err = ret != t->test[i].result;
+               if (!err)
+                       pr_cont("%lld ", res);
+
+               if (t->data_type == SKB || t->data_type == SKB_INT)
+                       kfree_skb(data);
+
+               if (err) {
+                       pr_cont("ret %d != %d ", ret, t->test[i].result);
+                       err_cnt++;
+               }
+       }
+
+       return err_cnt;
+}
+
+static __init int test_bpf(void)
+{
+       struct sk_filter *fp, *fp_ext = NULL;
+       struct sock_fprog fprog;
+       int err, i, err_cnt = 0;
+
+       for (i = 0; i < ARRAY_SIZE(tests); i++) {
+               pr_info("#%d %s ", i, tests[i].descr);
+
+               fprog.filter = tests[i].insns;
+               fprog.len = get_length(fprog.filter);
+
+               if (tests[i].data_type == SKB_INT) {
+                       fp_ext = kzalloc(4096, GFP_KERNEL);
+                       if (!fp_ext)
+                               return -ENOMEM;
+                       fp = fp_ext;
+                       memcpy(fp_ext->insns, tests[i].insns_int,
+                              fprog.len * 8);
+                       fp->len = fprog.len;
+                       sk_filter_select_runtime(fp);
+               } else {
+                       err = sk_unattached_filter_create(&fp, &fprog);
+                       if (tests[i].data_type == EXPECTED_FAIL) {
+                               if (err == -EINVAL) {
+                                       pr_cont("PASS\n");
+                                       continue;
+                               } else {
+                                       pr_cont("UNEXPECTED_PASS\n");
+                                       /* verifier didn't reject the test
+                                        * that's bad enough, just return
+                                        */
+                                       return -EINVAL;
+                               }
+                       }
+                       if (err) {
+                               pr_cont("FAIL to attach err=%d len=%d\n",
+                                       err, fprog.len);
+                               return err;
+                       }
+               }
+
+               err = run_one(fp, &tests[i]);
+
+               if (tests[i].data_type != SKB_INT)
+                       sk_unattached_filter_destroy(fp);
+               else
+                       sk_filter_free(fp);
+
+               if (err) {
+                       pr_cont("FAIL %d\n", err);
+                       err_cnt++;
+               } else {
+                       pr_cont("PASS\n");
+               }
+       }
+
+       if (err_cnt)
+               return -EINVAL;
+       else
+               return 0;
+}
+
+static int __init test_bpf_init(void)
+{
+       return test_bpf();
+}
+
+static void __exit test_bpf_exit(void)
+{
+}
+
+module_init(test_bpf_init);
+module_exit(test_bpf_exit);
+MODULE_LICENSE("GPL");
index d0f0bef3be488af9eb9406cc5d28272093abb5a6..037b812a953141f3dc77b1f7402b29bb54cd9e44 100644 (file)
@@ -232,17 +232,18 @@ void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long
 #endif
 }
 
-void tlb_flush_mmu(struct mmu_gather *tlb)
+static void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
 {
-       struct mmu_gather_batch *batch;
-
-       if (!tlb->need_flush)
-               return;
        tlb->need_flush = 0;
        tlb_flush(tlb);
 #ifdef CONFIG_HAVE_RCU_TABLE_FREE
        tlb_table_flush(tlb);
 #endif
+}
+
+static void tlb_flush_mmu_free(struct mmu_gather *tlb)
+{
+       struct mmu_gather_batch *batch;
 
        for (batch = &tlb->local; batch; batch = batch->next) {
                free_pages_and_swap_cache(batch->pages, batch->nr);
@@ -251,6 +252,14 @@ void tlb_flush_mmu(struct mmu_gather *tlb)
        tlb->active = &tlb->local;
 }
 
+void tlb_flush_mmu(struct mmu_gather *tlb)
+{
+       if (!tlb->need_flush)
+               return;
+       tlb_flush_mmu_tlbonly(tlb);
+       tlb_flush_mmu_free(tlb);
+}
+
 /* tlb_finish_mmu
  *     Called at the end of the shootdown operation to free up any resources
  *     that were required.
@@ -1127,8 +1136,10 @@ again:
                        if (PageAnon(page))
                                rss[MM_ANONPAGES]--;
                        else {
-                               if (pte_dirty(ptent))
+                               if (pte_dirty(ptent)) {
+                                       force_flush = 1;
                                        set_page_dirty(page);
+                               }
                                if (pte_young(ptent) &&
                                    likely(!(vma->vm_flags & VM_SEQ_READ)))
                                        mark_page_accessed(page);
@@ -1137,9 +1148,10 @@ again:
                        page_remove_rmap(page);
                        if (unlikely(page_mapcount(page) < 0))
                                print_bad_pte(vma, addr, ptent, page);
-                       force_flush = !__tlb_remove_page(tlb, page);
-                       if (force_flush)
+                       if (unlikely(!__tlb_remove_page(tlb, page))) {
+                               force_flush = 1;
                                break;
+                       }
                        continue;
                }
                /*
@@ -1174,18 +1186,11 @@ again:
 
        add_mm_rss_vec(mm, rss);
        arch_leave_lazy_mmu_mode();
-       pte_unmap_unlock(start_pte, ptl);
 
-       /*
-        * mmu_gather ran out of room to batch pages, we break out of
-        * the PTE lock to avoid doing the potential expensive TLB invalidate
-        * and page-free while holding it.
-        */
+       /* Do the actual TLB flush before dropping ptl */
        if (force_flush) {
                unsigned long old_end;
 
-               force_flush = 0;
-
                /*
                 * Flush the TLB just for the previous segment,
                 * then update the range to be the remaining
@@ -1193,11 +1198,21 @@ again:
                 */
                old_end = tlb->end;
                tlb->end = addr;
-
-               tlb_flush_mmu(tlb);
-
+               tlb_flush_mmu_tlbonly(tlb);
                tlb->start = addr;
                tlb->end = old_end;
+       }
+       pte_unmap_unlock(start_pte, ptl);
+
+       /*
+        * If we forced a TLB flush (either due to running out of
+        * batch buffers or because we needed to flush dirty TLB
+        * entries before releasing the ptl), free the batched
+        * memory too. Restart if we didn't do everything.
+        */
+       if (force_flush) {
+               force_flush = 0;
+               tlb_flush_mmu_free(tlb);
 
                if (addr != end)
                        goto again;
@@ -1955,12 +1970,17 @@ int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm,
                     unsigned long address, unsigned int fault_flags)
 {
        struct vm_area_struct *vma;
+       vm_flags_t vm_flags;
        int ret;
 
        vma = find_extend_vma(mm, address);
        if (!vma || address < vma->vm_start)
                return -EFAULT;
 
+       vm_flags = (fault_flags & FAULT_FLAG_WRITE) ? VM_WRITE : VM_READ;
+       if (!(vm_flags & vma->vm_flags))
+               return -EFAULT;
+
        ret = handle_mm_fault(mm, vma, address, fault_flags);
        if (ret & VM_FAULT_ERROR) {
                if (ret & VM_FAULT_OOM)
index d4224b397c0e4e4492fa135c3c5ee9b224872c07..1037a3bab50529f84c9d81c383df07dbfbbda081 100644 (file)
@@ -81,10 +81,12 @@ struct vm_area_struct *vmacache_find(struct mm_struct *mm, unsigned long addr)
        for (i = 0; i < VMACACHE_SIZE; i++) {
                struct vm_area_struct *vma = current->vmacache[i];
 
-               if (vma && vma->vm_start <= addr && vma->vm_end > addr) {
-                       BUG_ON(vma->vm_mm != mm);
+               if (!vma)
+                       continue;
+               if (WARN_ON_ONCE(vma->vm_mm != mm))
+                       break;
+               if (vma->vm_start <= addr && vma->vm_end > addr)
                        return vma;
-               }
        }
 
        return NULL;
index 3c32bd257b73975a33ba104c1c3b3797d9f29843..9012b1c922b61acd28fffb7f50b4968da9293b2f 100644 (file)
@@ -63,7 +63,7 @@ bool vlan_do_receive(struct sk_buff **skbp)
 }
 
 /* Must be invoked with rcu_read_lock. */
-struct net_device *__vlan_find_dev_deep(struct net_device *dev,
+struct net_device *__vlan_find_dev_deep_rcu(struct net_device *dev,
                                        __be16 vlan_proto, u16 vlan_id)
 {
        struct vlan_info *vlan_info = rcu_dereference(dev->vlan_info);
@@ -81,13 +81,13 @@ struct net_device *__vlan_find_dev_deep(struct net_device *dev,
 
                upper_dev = netdev_master_upper_dev_get_rcu(dev);
                if (upper_dev)
-                       return __vlan_find_dev_deep(upper_dev,
+                       return __vlan_find_dev_deep_rcu(upper_dev,
                                                    vlan_proto, vlan_id);
        }
 
        return NULL;
 }
-EXPORT_SYMBOL(__vlan_find_dev_deep);
+EXPORT_SYMBOL(__vlan_find_dev_deep_rcu);
 
 struct net_device *vlan_dev_real_dev(const struct net_device *dev)
 {
index 733ec283ed1b9e85f9181f67116052f88bb49951..4181fb71ba77c8770e16ccffd8ccb0bc81066a28 100644 (file)
@@ -678,9 +678,9 @@ static netdev_features_t vlan_dev_fix_features(struct net_device *dev,
        struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
        netdev_features_t old_features = features;
 
-       features &= real_dev->vlan_features;
+       features = netdev_intersect_features(features, real_dev->vlan_features);
        features |= NETIF_F_RXCSUM;
-       features &= real_dev->features;
+       features = netdev_intersect_features(features, real_dev->features);
 
        features |= old_features & NETIF_F_SOFT_FEATURES;
        features |= NETIF_F_LLTX;
@@ -706,38 +706,36 @@ static void vlan_ethtool_get_drvinfo(struct net_device *dev,
 
 static struct rtnl_link_stats64 *vlan_dev_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
 {
+       struct vlan_pcpu_stats *p;
+       u32 rx_errors = 0, tx_dropped = 0;
+       int i;
 
-       if (vlan_dev_priv(dev)->vlan_pcpu_stats) {
-               struct vlan_pcpu_stats *p;
-               u32 rx_errors = 0, tx_dropped = 0;
-               int i;
-
-               for_each_possible_cpu(i) {
-                       u64 rxpackets, rxbytes, rxmulticast, txpackets, txbytes;
-                       unsigned int start;
-
-                       p = per_cpu_ptr(vlan_dev_priv(dev)->vlan_pcpu_stats, i);
-                       do {
-                               start = u64_stats_fetch_begin_irq(&p->syncp);
-                               rxpackets       = p->rx_packets;
-                               rxbytes         = p->rx_bytes;
-                               rxmulticast     = p->rx_multicast;
-                               txpackets       = p->tx_packets;
-                               txbytes         = p->tx_bytes;
-                       } while (u64_stats_fetch_retry_irq(&p->syncp, start));
-
-                       stats->rx_packets       += rxpackets;
-                       stats->rx_bytes         += rxbytes;
-                       stats->multicast        += rxmulticast;
-                       stats->tx_packets       += txpackets;
-                       stats->tx_bytes         += txbytes;
-                       /* rx_errors & tx_dropped are u32 */
-                       rx_errors       += p->rx_errors;
-                       tx_dropped      += p->tx_dropped;
-               }
-               stats->rx_errors  = rx_errors;
-               stats->tx_dropped = tx_dropped;
+       for_each_possible_cpu(i) {
+               u64 rxpackets, rxbytes, rxmulticast, txpackets, txbytes;
+               unsigned int start;
+
+               p = per_cpu_ptr(vlan_dev_priv(dev)->vlan_pcpu_stats, i);
+               do {
+                       start = u64_stats_fetch_begin_irq(&p->syncp);
+                       rxpackets       = p->rx_packets;
+                       rxbytes         = p->rx_bytes;
+                       rxmulticast     = p->rx_multicast;
+                       txpackets       = p->tx_packets;
+                       txbytes         = p->tx_bytes;
+               } while (u64_stats_fetch_retry_irq(&p->syncp, start));
+
+               stats->rx_packets       += rxpackets;
+               stats->rx_bytes         += rxbytes;
+               stats->multicast        += rxmulticast;
+               stats->tx_packets       += txpackets;
+               stats->tx_bytes         += txbytes;
+               /* rx_errors & tx_dropped are u32 */
+               rx_errors       += p->rx_errors;
+               tx_dropped      += p->tx_dropped;
        }
+       stats->rx_errors  = rx_errors;
+       stats->tx_dropped = tx_dropped;
+
        return stats;
 }
 
index b758881be108c84bfa059cf61f15d44a9008c1c8..a12e25efaf6ff055094f843c7c5536ce861f593a 100644 (file)
@@ -245,6 +245,7 @@ static int batadv_algorithms_open(struct inode *inode, struct file *file)
 static int batadv_originators_open(struct inode *inode, struct file *file)
 {
        struct net_device *net_dev = (struct net_device *)inode->i_private;
+
        return single_open(file, batadv_orig_seq_print_text, net_dev);
 }
 
@@ -258,18 +259,21 @@ static int batadv_originators_hardif_open(struct inode *inode,
                                          struct file *file)
 {
        struct net_device *net_dev = (struct net_device *)inode->i_private;
+
        return single_open(file, batadv_orig_hardif_seq_print_text, net_dev);
 }
 
 static int batadv_gateways_open(struct inode *inode, struct file *file)
 {
        struct net_device *net_dev = (struct net_device *)inode->i_private;
+
        return single_open(file, batadv_gw_client_seq_print_text, net_dev);
 }
 
 static int batadv_transtable_global_open(struct inode *inode, struct file *file)
 {
        struct net_device *net_dev = (struct net_device *)inode->i_private;
+
        return single_open(file, batadv_tt_global_seq_print_text, net_dev);
 }
 
@@ -277,6 +281,7 @@ static int batadv_transtable_global_open(struct inode *inode, struct file *file)
 static int batadv_bla_claim_table_open(struct inode *inode, struct file *file)
 {
        struct net_device *net_dev = (struct net_device *)inode->i_private;
+
        return single_open(file, batadv_bla_claim_table_seq_print_text,
                           net_dev);
 }
@@ -285,6 +290,7 @@ static int batadv_bla_backbone_table_open(struct inode *inode,
                                          struct file *file)
 {
        struct net_device *net_dev = (struct net_device *)inode->i_private;
+
        return single_open(file, batadv_bla_backbone_table_seq_print_text,
                           net_dev);
 }
@@ -300,6 +306,7 @@ static int batadv_bla_backbone_table_open(struct inode *inode,
 static int batadv_dat_cache_open(struct inode *inode, struct file *file)
 {
        struct net_device *net_dev = (struct net_device *)inode->i_private;
+
        return single_open(file, batadv_dat_cache_seq_print_text, net_dev);
 }
 #endif
@@ -307,6 +314,7 @@ static int batadv_dat_cache_open(struct inode *inode, struct file *file)
 static int batadv_transtable_local_open(struct inode *inode, struct file *file)
 {
        struct net_device *net_dev = (struct net_device *)inode->i_private;
+
        return single_open(file, batadv_tt_local_seq_print_text, net_dev);
 }
 
@@ -319,6 +327,7 @@ struct batadv_debuginfo {
 static int batadv_nc_nodes_open(struct inode *inode, struct file *file)
 {
        struct net_device *net_dev = (struct net_device *)inode->i_private;
+
        return single_open(file, batadv_nc_nodes_seq_print_text, net_dev);
 }
 #endif
@@ -333,7 +342,7 @@ struct batadv_debuginfo batadv_debuginfo_##_name = {        \
                  .llseek = seq_lseek,                  \
                  .release = single_release,            \
                }                                       \
-};
+}
 
 /* the following attributes are general and therefore they will be directly
  * placed in the BATADV_DEBUGFS_SUBDIR subdirectory of debugfs
@@ -395,7 +404,7 @@ struct batadv_debuginfo batadv_hardif_debuginfo_##_name = { \
                .llseek = seq_lseek,                            \
                .release = single_release,                      \
        },                                                      \
-};
+}
 static BATADV_HARDIF_DEBUGINFO(originators, S_IRUGO,
                               batadv_originators_hardif_open);
 
index b25fd64d727b0d6e8227671f860b133095df5100..60889df808f3abb9c67dc41e07d2ac8273807c18 100644 (file)
@@ -662,6 +662,7 @@ static void batadv_dat_tvlv_container_update(struct batadv_priv *bat_priv)
 void batadv_dat_status_update(struct net_device *net_dev)
 {
        struct batadv_priv *bat_priv = netdev_priv(net_dev);
+
        batadv_dat_tvlv_container_update(bat_priv);
 }
 
index 770dc890ceefdb712f254b378c825cbeab255742..118b990bae25d7ecc7221330546b59a5391cd21e 100644 (file)
@@ -24,7 +24,7 @@
 #define BATADV_DRIVER_DEVICE "batman-adv"
 
 #ifndef BATADV_SOURCE_VERSION
-#define BATADV_SOURCE_VERSION "2014.2.0"
+#define BATADV_SOURCE_VERSION "2014.3.0"
 #endif
 
 /* B.A.T.M.A.N. parameters */
index a9546fe541ebb0ff8905fd3fe82d48e48302ce9e..40a2fc4bcf4c4e1887a8cfc571a13631ef1e655b 100644 (file)
@@ -86,6 +86,7 @@ static void batadv_nc_tvlv_container_update(struct batadv_priv *bat_priv)
 void batadv_nc_status_update(struct net_device *net_dev)
 {
        struct batadv_priv *bat_priv = netdev_priv(net_dev);
+
        batadv_nc_tvlv_container_update(bat_priv);
 }
 
index 744a59b85e15ded75f61da8a9fa5a8a87cdb7b8d..e7ee65dc20bf4f25a1a8d0134c66b0bfaef25bd3 100644 (file)
@@ -884,7 +884,7 @@ static void batadv_softif_init_early(struct net_device *dev)
        /* generate random address */
        eth_hw_addr_random(dev);
 
-       SET_ETHTOOL_OPS(dev, &batadv_ethtool_ops);
+       dev->ethtool_ops = &batadv_ethtool_ops;
 
        memset(priv, 0, sizeof(*priv));
 }
index 1ebb0d9e2ea547d1c263a6b09d30d81214e4ba33..fc47baa888c54896c6ccde6352202736d3c9ba6b 100644 (file)
 static struct net_device *batadv_kobj_to_netdev(struct kobject *obj)
 {
        struct device *dev = container_of(obj->parent, struct device, kobj);
+
        return to_net_dev(dev);
 }
 
 static struct batadv_priv *batadv_kobj_to_batpriv(struct kobject *obj)
 {
        struct net_device *net_dev = batadv_kobj_to_netdev(obj);
+
        return netdev_priv(net_dev);
 }
 
@@ -106,7 +108,7 @@ struct batadv_attribute batadv_attr_vlan_##_name = {        \
                 .mode = _mode },                       \
        .show   = _show,                                \
        .store  = _store,                               \
-};
+}
 
 /* Use this, if you have customized show and store functions */
 #define BATADV_ATTR(_name, _mode, _show, _store)       \
@@ -115,7 +117,7 @@ struct batadv_attribute batadv_attr_##_name = {             \
                 .mode = _mode },                       \
        .show   = _show,                                \
        .store  = _store,                               \
-};
+}
 
 #define BATADV_ATTR_SIF_STORE_BOOL(_name, _post_func)                  \
 ssize_t batadv_store_##_name(struct kobject *kobj,                     \
@@ -124,6 +126,7 @@ ssize_t batadv_store_##_name(struct kobject *kobj,                  \
 {                                                                      \
        struct net_device *net_dev = batadv_kobj_to_netdev(kobj);       \
        struct batadv_priv *bat_priv = netdev_priv(net_dev);            \
+                                                                       \
        return __batadv_store_bool_attr(buff, count, _post_func, attr,  \
                                        &bat_priv->_name, net_dev);     \
 }
@@ -133,6 +136,7 @@ ssize_t batadv_show_##_name(struct kobject *kobj,                   \
                            struct attribute *attr, char *buff)         \
 {                                                                      \
        struct batadv_priv *bat_priv = batadv_kobj_to_batpriv(kobj);    \
+                                                                       \
        return sprintf(buff, "%s\n",                                    \
                       atomic_read(&bat_priv->_name) == 0 ?             \
                       "disabled" : "enabled");                         \
@@ -155,6 +159,7 @@ ssize_t batadv_store_##_name(struct kobject *kobj,                  \
 {                                                                      \
        struct net_device *net_dev = batadv_kobj_to_netdev(kobj);       \
        struct batadv_priv *bat_priv = netdev_priv(net_dev);            \
+                                                                       \
        return __batadv_store_uint_attr(buff, count, _min, _max,        \
                                        _post_func, attr,               \
                                        &bat_priv->_name, net_dev);     \
@@ -165,6 +170,7 @@ ssize_t batadv_show_##_name(struct kobject *kobj,                   \
                            struct attribute *attr, char *buff)         \
 {                                                                      \
        struct batadv_priv *bat_priv = batadv_kobj_to_batpriv(kobj);    \
+                                                                       \
        return sprintf(buff, "%i\n", atomic_read(&bat_priv->_name));    \
 }                                                                      \
 
@@ -188,6 +194,7 @@ ssize_t batadv_store_vlan_##_name(struct kobject *kobj,                     \
        size_t res = __batadv_store_bool_attr(buff, count, _post_func,  \
                                              attr, &vlan->_name,       \
                                              bat_priv->soft_iface);    \
+                                                                       \
        batadv_softif_vlan_free_ref(vlan);                              \
        return res;                                                     \
 }
@@ -202,6 +209,7 @@ ssize_t batadv_show_vlan_##_name(struct kobject *kobj,                      \
        size_t res = sprintf(buff, "%s\n",                              \
                             atomic_read(&vlan->_name) == 0 ?           \
                             "disabled" : "enabled");                   \
+                                                                       \
        batadv_softif_vlan_free_ref(vlan);                              \
        return res;                                                     \
 }
@@ -324,12 +332,14 @@ static ssize_t batadv_show_bat_algo(struct kobject *kobj,
                                    struct attribute *attr, char *buff)
 {
        struct batadv_priv *bat_priv = batadv_kobj_to_batpriv(kobj);
+
        return sprintf(buff, "%s\n", bat_priv->bat_algo_ops->name);
 }
 
 static void batadv_post_gw_reselect(struct net_device *net_dev)
 {
        struct batadv_priv *bat_priv = netdev_priv(net_dev);
+
        batadv_gw_reselect(bat_priv);
 }
 
index d958e2dca52fa5bb4d166e0073fa90d18729a73f..095943c02d6e4ae9643cd1099e7e648ef8de6488 100644 (file)
@@ -367,9 +367,23 @@ static void le_conn_timeout(struct work_struct *work)
 {
        struct hci_conn *conn = container_of(work, struct hci_conn,
                                             le_conn_timeout.work);
+       struct hci_dev *hdev = conn->hdev;
 
        BT_DBG("");
 
+       /* We could end up here due to having done directed advertising,
+        * so clean up the state if necessary. This should however only
+        * happen with broken hardware or if low duty cycle was used
+        * (which doesn't have a timeout of its own).
+        */
+       if (test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
+               u8 enable = 0x00;
+               hci_send_cmd(hdev, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable),
+                            &enable);
+               hci_le_conn_failed(conn, HCI_ERROR_ADVERTISING_TIMEOUT);
+               return;
+       }
+
        hci_le_create_connection_cancel(conn);
 }
 
@@ -401,6 +415,10 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst)
        case ACL_LINK:
                conn->pkt_type = hdev->pkt_type & ACL_PTYPE_MASK;
                break;
+       case LE_LINK:
+               /* conn->src should reflect the local identity address */
+               hci_copy_identity_address(hdev, &conn->src, &conn->src_type);
+               break;
        case SCO_LINK:
                if (lmp_esco_capable(hdev))
                        conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
@@ -545,6 +563,11 @@ void hci_le_conn_failed(struct hci_conn *conn, u8 status)
         * favor of connection establishment, we should restart it.
         */
        hci_update_background_scan(hdev);
+
+       /* Re-enable advertising in case this was a failed connection
+        * attempt as a peripheral.
+        */
+       mgmt_reenable_advertising(hdev);
 }
 
 static void create_le_conn_complete(struct hci_dev *hdev, u8 status)
@@ -605,6 +628,45 @@ static void hci_req_add_le_create_conn(struct hci_request *req,
        conn->state = BT_CONNECT;
 }
 
+static void hci_req_directed_advertising(struct hci_request *req,
+                                        struct hci_conn *conn)
+{
+       struct hci_dev *hdev = req->hdev;
+       struct hci_cp_le_set_adv_param cp;
+       u8 own_addr_type;
+       u8 enable;
+
+       enable = 0x00;
+       hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
+
+       /* Clear the HCI_ADVERTISING bit temporarily so that the
+        * hci_update_random_address knows that it's safe to go ahead
+        * and write a new random address. The flag will be set back on
+        * as soon as the SET_ADV_ENABLE HCI command completes.
+        */
+       clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
+
+       /* Set require_privacy to false so that the remote device has a
+        * chance of identifying us.
+        */
+       if (hci_update_random_address(req, false, &own_addr_type) < 0)
+               return;
+
+       memset(&cp, 0, sizeof(cp));
+       cp.type = LE_ADV_DIRECT_IND;
+       cp.own_address_type = own_addr_type;
+       cp.direct_addr_type = conn->dst_type;
+       bacpy(&cp.direct_addr, &conn->dst);
+       cp.channel_map = hdev->le_adv_channel_map;
+
+       hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
+
+       enable = 0x01;
+       hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
+
+       conn->state = BT_CONNECT;
+}
+
 struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
                                u8 dst_type, u8 sec_level, u8 auth_type)
 {
@@ -614,9 +676,6 @@ struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
        struct hci_request req;
        int err;
 
-       if (test_bit(HCI_ADVERTISING, &hdev->flags))
-               return ERR_PTR(-ENOTSUPP);
-
        /* Some devices send ATT messages as soon as the physical link is
         * established. To be able to handle these ATT messages, the user-
         * space first establishes the connection and then starts the pairing
@@ -664,13 +723,20 @@ struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
                return ERR_PTR(-ENOMEM);
 
        conn->dst_type = dst_type;
-
-       conn->out = true;
-       conn->link_mode |= HCI_LM_MASTER;
        conn->sec_level = BT_SECURITY_LOW;
        conn->pending_sec_level = sec_level;
        conn->auth_type = auth_type;
 
+       hci_req_init(&req, hdev);
+
+       if (test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
+               hci_req_directed_advertising(&req, conn);
+               goto create_conn;
+       }
+
+       conn->out = true;
+       conn->link_mode |= HCI_LM_MASTER;
+
        params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
        if (params) {
                conn->le_conn_min_interval = params->conn_min_interval;
@@ -680,8 +746,6 @@ struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
                conn->le_conn_max_interval = hdev->le_conn_max_interval;
        }
 
-       hci_req_init(&req, hdev);
-
        /* If controller is scanning, we stop it since some controllers are
         * not able to scan and connect at the same time. Also set the
         * HCI_LE_SCAN_INTERRUPTED flag so that the command complete
@@ -695,6 +759,7 @@ struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
 
        hci_req_add_le_create_conn(&req, conn);
 
+create_conn:
        err = hci_req_run(&req, create_le_conn_complete);
        if (err) {
                hci_conn_del(conn);
@@ -819,14 +884,17 @@ static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
        if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
                struct hci_cp_auth_requested cp;
 
-               /* encrypt must be pending if auth is also pending */
-               set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
-
                cp.handle = cpu_to_le16(conn->handle);
                hci_send_cmd(conn->hdev, HCI_OP_AUTH_REQUESTED,
                             sizeof(cp), &cp);
+
+               /* If we're already encrypted set the REAUTH_PEND flag,
+                * otherwise set the ENCRYPT_PEND.
+                */
                if (conn->key_type != 0xff)
                        set_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
+               else
+                       set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
        }
 
        return 0;
index 1c6ffaa8902f5e9fe32a86f2a19cf9073d6f0dae..d31f144860d127cdf223f2145185feb963e011c4 100644 (file)
@@ -955,14 +955,9 @@ static ssize_t le_auto_conn_write(struct file *file, const char __user *data,
        if (count < 3)
                return -EINVAL;
 
-       buf = kzalloc(count, GFP_KERNEL);
-       if (!buf)
-               return -ENOMEM;
-
-       if (copy_from_user(buf, data, count)) {
-               err = -EFAULT;
-               goto done;
-       }
+       buf = memdup_user(data, count);
+       if (IS_ERR(buf))
+               return PTR_ERR(buf);
 
        if (memcmp(buf, "add", 3) == 0) {
                n = sscanf(&buf[4], "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx %hhu %hhu",
@@ -1828,6 +1823,9 @@ static int __hci_init(struct hci_dev *hdev)
                                    &lowpan_debugfs_fops);
                debugfs_create_file("le_auto_conn", 0644, hdev->debugfs, hdev,
                                    &le_auto_conn_fops);
+               debugfs_create_u16("discov_interleaved_timeout", 0644,
+                                  hdev->debugfs,
+                                  &hdev->discov_interleaved_timeout);
        }
 
        return 0;
@@ -2033,12 +2031,11 @@ bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
 
        hci_remove_remote_oob_data(hdev, &data->bdaddr);
 
-       if (ssp)
-               *ssp = data->ssp_mode;
+       *ssp = data->ssp_mode;
 
        ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
        if (ie) {
-               if (ie->data.ssp_mode && ssp)
+               if (ie->data.ssp_mode)
                        *ssp = true;
 
                if (ie->name_state == NAME_NEEDED &&
@@ -3791,6 +3788,7 @@ struct hci_dev *hci_alloc_dev(void)
        hdev->le_conn_max_interval = 0x0038;
 
        hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
+       hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
 
        mutex_init(&hdev->lock);
        mutex_init(&hdev->req_lock);
index 49774912cb01f23ef6f85cb26f8613f538edec94..ca19fd4bbb8f198ed5d3a3b959c35aceb3a1e54e 100644 (file)
@@ -991,10 +991,25 @@ static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb)
        if (!sent)
                return;
 
+       if (status)
+               return;
+
        hci_dev_lock(hdev);
 
-       if (!status)
-               mgmt_advertising(hdev, *sent);
+       /* If we're doing connection initation as peripheral. Set a
+        * timeout in case something goes wrong.
+        */
+       if (*sent) {
+               struct hci_conn *conn;
+
+               conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
+               if (conn)
+                       queue_delayed_work(hdev->workqueue,
+                                          &conn->le_conn_timeout,
+                                          HCI_LE_CONN_TIMEOUT);
+       }
+
+       mgmt_advertising(hdev, *sent);
 
        hci_dev_unlock(hdev);
 }
@@ -1018,6 +1033,33 @@ static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb)
        hci_dev_unlock(hdev);
 }
 
+static bool has_pending_adv_report(struct hci_dev *hdev)
+{
+       struct discovery_state *d = &hdev->discovery;
+
+       return bacmp(&d->last_adv_addr, BDADDR_ANY);
+}
+
+static void clear_pending_adv_report(struct hci_dev *hdev)
+{
+       struct discovery_state *d = &hdev->discovery;
+
+       bacpy(&d->last_adv_addr, BDADDR_ANY);
+       d->last_adv_data_len = 0;
+}
+
+static void store_pending_adv_report(struct hci_dev *hdev, bdaddr_t *bdaddr,
+                                    u8 bdaddr_type, s8 rssi, u8 *data, u8 len)
+{
+       struct discovery_state *d = &hdev->discovery;
+
+       bacpy(&d->last_adv_addr, bdaddr);
+       d->last_adv_addr_type = bdaddr_type;
+       d->last_adv_rssi = rssi;
+       memcpy(d->last_adv_data, data, len);
+       d->last_adv_data_len = len;
+}
+
 static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
                                      struct sk_buff *skb)
 {
@@ -1036,9 +1078,25 @@ static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
        switch (cp->enable) {
        case LE_SCAN_ENABLE:
                set_bit(HCI_LE_SCAN, &hdev->dev_flags);
+               if (hdev->le_scan_type == LE_SCAN_ACTIVE)
+                       clear_pending_adv_report(hdev);
                break;
 
        case LE_SCAN_DISABLE:
+               /* We do this here instead of when setting DISCOVERY_STOPPED
+                * since the latter would potentially require waiting for
+                * inquiry to stop too.
+                */
+               if (has_pending_adv_report(hdev)) {
+                       struct discovery_state *d = &hdev->discovery;
+
+                       mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
+                                         d->last_adv_addr_type, NULL,
+                                         d->last_adv_rssi, 0, 1,
+                                         d->last_adv_data,
+                                         d->last_adv_data_len, NULL, 0);
+               }
+
                /* Cancel this timer so that we don't try to disable scanning
                 * when it's already disabled.
                 */
@@ -1827,7 +1885,7 @@ static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
                name_known = hci_inquiry_cache_update(hdev, &data, false, &ssp);
                mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
                                  info->dev_class, 0, !name_known, ssp, NULL,
-                                 0);
+                                 0, NULL, 0);
        }
 
        hci_dev_unlock(hdev);
@@ -3102,7 +3160,7 @@ static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,
                                                              false, &ssp);
                        mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
                                          info->dev_class, info->rssi,
-                                         !name_known, ssp, NULL, 0);
+                                         !name_known, ssp, NULL, 0, NULL, 0);
                }
        } else {
                struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
@@ -3120,7 +3178,7 @@ static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,
                                                              false, &ssp);
                        mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
                                          info->dev_class, info->rssi,
-                                         !name_known, ssp, NULL, 0);
+                                         !name_known, ssp, NULL, 0, NULL, 0);
                }
        }
 
@@ -3309,7 +3367,7 @@ static void hci_extended_inquiry_result_evt(struct hci_dev *hdev,
                eir_len = eir_get_length(info->data, sizeof(info->data));
                mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
                                  info->dev_class, info->rssi, !name_known,
-                                 ssp, info->data, eir_len);
+                                 ssp, info->data, eir_len, NULL, 0);
        }
 
        hci_dev_unlock(hdev);
@@ -3330,6 +3388,12 @@ static void hci_key_refresh_complete_evt(struct hci_dev *hdev,
        if (!conn)
                goto unlock;
 
+       /* For BR/EDR the necessary steps are taken through the
+        * auth_complete event.
+        */
+       if (conn->type != LE_LINK)
+               goto unlock;
+
        if (!ev->status)
                conn->sec_level = conn->pending_sec_level;
 
@@ -3361,24 +3425,20 @@ unlock:
 
 static u8 hci_get_auth_req(struct hci_conn *conn)
 {
-       /* If remote requests dedicated bonding follow that lead */
-       if (conn->remote_auth == HCI_AT_DEDICATED_BONDING ||
-           conn->remote_auth == HCI_AT_DEDICATED_BONDING_MITM) {
-               /* If both remote and local IO capabilities allow MITM
-                * protection then require it, otherwise don't */
-               if (conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT ||
-                   conn->io_capability == HCI_IO_NO_INPUT_OUTPUT)
-                       return HCI_AT_DEDICATED_BONDING;
-               else
-                       return HCI_AT_DEDICATED_BONDING_MITM;
-       }
-
        /* If remote requests no-bonding follow that lead */
        if (conn->remote_auth == HCI_AT_NO_BONDING ||
            conn->remote_auth == HCI_AT_NO_BONDING_MITM)
                return conn->remote_auth | (conn->auth_type & 0x01);
 
-       return conn->auth_type;
+       /* If both remote and local have enough IO capabilities, require
+        * MITM protection
+        */
+       if (conn->remote_cap != HCI_IO_NO_INPUT_OUTPUT &&
+           conn->io_capability != HCI_IO_NO_INPUT_OUTPUT)
+               return conn->remote_auth | 0x01;
+
+       /* No MITM protection possible so ignore remote requirement */
+       return (conn->remote_auth & ~0x01) | (conn->auth_type & 0x01);
 }
 
 static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
@@ -3408,8 +3468,21 @@ static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
                 * to DisplayYesNo as it is not supported by BT spec. */
                cp.capability = (conn->io_capability == 0x04) ?
                                HCI_IO_DISPLAY_YESNO : conn->io_capability;
-               conn->auth_type = hci_get_auth_req(conn);
-               cp.authentication = conn->auth_type;
+
+               /* If we are initiators, there is no remote information yet */
+               if (conn->remote_auth == 0xff) {
+                       cp.authentication = conn->auth_type;
+
+                       /* Request MITM protection if our IO caps allow it
+                        * except for the no-bonding case
+                        */
+                       if (conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
+                           cp.authentication != HCI_AT_NO_BONDING)
+                               cp.authentication |= 0x01;
+               } else {
+                       conn->auth_type = hci_get_auth_req(conn);
+                       cp.authentication = conn->auth_type;
+               }
 
                if (hci_find_remote_oob_data(hdev, &conn->dst) &&
                    (conn->out || test_bit(HCI_CONN_REMOTE_OOB, &conn->flags)))
@@ -3477,12 +3550,9 @@ static void hci_user_confirm_request_evt(struct hci_dev *hdev,
        rem_mitm = (conn->remote_auth & 0x01);
 
        /* If we require MITM but the remote device can't provide that
-        * (it has NoInputNoOutput) then reject the confirmation
-        * request. The only exception is when we're dedicated bonding
-        * initiators (connect_cfm_cb set) since then we always have the MITM
-        * bit set. */
-       if (!conn->connect_cfm_cb && loc_mitm &&
-           conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) {
+        * (it has NoInputNoOutput) then reject the confirmation request
+        */
+       if (loc_mitm && conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) {
                BT_DBG("Rejecting request: remote device can't provide MITM");
                hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
                             sizeof(ev->bdaddr), &ev->bdaddr);
@@ -3840,17 +3910,6 @@ static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
 
                conn->dst_type = ev->bdaddr_type;
 
-               /* The advertising parameters for own address type
-                * define which source address and source address
-                * type this connections has.
-                */
-               if (bacmp(&conn->src, BDADDR_ANY)) {
-                       conn->src_type = ADDR_LE_DEV_PUBLIC;
-               } else {
-                       bacpy(&conn->src, &hdev->static_addr);
-                       conn->src_type = ADDR_LE_DEV_RANDOM;
-               }
-
                if (ev->role == LE_CONN_ROLE_MASTER) {
                        conn->out = true;
                        conn->link_mode |= HCI_LM_MASTER;
@@ -3875,27 +3934,24 @@ static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
                                                          &conn->init_addr,
                                                          &conn->init_addr_type);
                        }
-               } else {
-                       /* Set the responder (our side) address type based on
-                        * the advertising address type.
-                        */
-                       conn->resp_addr_type = hdev->adv_addr_type;
-                       if (hdev->adv_addr_type == ADDR_LE_DEV_RANDOM)
-                               bacpy(&conn->resp_addr, &hdev->random_addr);
-                       else
-                               bacpy(&conn->resp_addr, &hdev->bdaddr);
-
-                       conn->init_addr_type = ev->bdaddr_type;
-                       bacpy(&conn->init_addr, &ev->bdaddr);
                }
        } else {
                cancel_delayed_work(&conn->le_conn_timeout);
        }
 
-       /* Ensure that the hci_conn contains the identity address type
-        * regardless of which address the connection was made with.
-        */
-       hci_copy_identity_address(hdev, &conn->src, &conn->src_type);
+       if (!conn->out) {
+               /* Set the responder (our side) address type based on
+                * the advertising address type.
+                */
+               conn->resp_addr_type = hdev->adv_addr_type;
+               if (hdev->adv_addr_type == ADDR_LE_DEV_RANDOM)
+                       bacpy(&conn->resp_addr, &hdev->random_addr);
+               else
+                       bacpy(&conn->resp_addr, &hdev->bdaddr);
+
+               conn->init_addr_type = ev->bdaddr_type;
+               bacpy(&conn->init_addr, &ev->bdaddr);
+       }
 
        /* Lookup the identity address from the stored connection
         * address and address type.
@@ -3975,25 +4031,97 @@ static void check_pending_le_conn(struct hci_dev *hdev, bdaddr_t *addr,
        }
 }
 
+static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
+                              u8 bdaddr_type, s8 rssi, u8 *data, u8 len)
+{
+       struct discovery_state *d = &hdev->discovery;
+       bool match;
+
+       /* Passive scanning shouldn't trigger any device found events */
+       if (hdev->le_scan_type == LE_SCAN_PASSIVE) {
+               if (type == LE_ADV_IND || type == LE_ADV_DIRECT_IND)
+                       check_pending_le_conn(hdev, bdaddr, bdaddr_type);
+               return;
+       }
+
+       /* If there's nothing pending either store the data from this
+        * event or send an immediate device found event if the data
+        * should not be stored for later.
+        */
+       if (!has_pending_adv_report(hdev)) {
+               /* If the report will trigger a SCAN_REQ store it for
+                * later merging.
+                */
+               if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
+                       store_pending_adv_report(hdev, bdaddr, bdaddr_type,
+                                                rssi, data, len);
+                       return;
+               }
+
+               mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
+                                 rssi, 0, 1, data, len, NULL, 0);
+               return;
+       }
+
+       /* Check if the pending report is for the same device as the new one */
+       match = (!bacmp(bdaddr, &d->last_adv_addr) &&
+                bdaddr_type == d->last_adv_addr_type);
+
+       /* If the pending data doesn't match this report or this isn't a
+        * scan response (e.g. we got a duplicate ADV_IND) then force
+        * sending of the pending data.
+        */
+       if (type != LE_ADV_SCAN_RSP || !match) {
+               /* Send out whatever is in the cache, but skip duplicates */
+               if (!match)
+                       mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
+                                         d->last_adv_addr_type, NULL,
+                                         d->last_adv_rssi, 0, 1,
+                                         d->last_adv_data,
+                                         d->last_adv_data_len, NULL, 0);
+
+               /* If the new report will trigger a SCAN_REQ store it for
+                * later merging.
+                */
+               if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
+                       store_pending_adv_report(hdev, bdaddr, bdaddr_type,
+                                                rssi, data, len);
+                       return;
+               }
+
+               /* The advertising reports cannot be merged, so clear
+                * the pending report and send out a device found event.
+                */
+               clear_pending_adv_report(hdev);
+               mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
+                                 rssi, 0, 1, data, len, NULL, 0);
+               return;
+       }
+
+       /* If we get here we've got a pending ADV_IND or ADV_SCAN_IND and
+        * the new event is a SCAN_RSP. We can therefore proceed with
+        * sending a merged device found event.
+        */
+       mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
+                         d->last_adv_addr_type, NULL, rssi, 0, 1, data, len,
+                         d->last_adv_data, d->last_adv_data_len);
+       clear_pending_adv_report(hdev);
+}
+
 static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
 {
        u8 num_reports = skb->data[0];
        void *ptr = &skb->data[1];
-       s8 rssi;
 
        hci_dev_lock(hdev);
 
        while (num_reports--) {
                struct hci_ev_le_advertising_info *ev = ptr;
-
-               if (ev->evt_type == LE_ADV_IND ||
-                   ev->evt_type == LE_ADV_DIRECT_IND)
-                       check_pending_le_conn(hdev, &ev->bdaddr,
-                                             ev->bdaddr_type);
+               s8 rssi;
 
                rssi = ev->data[ev->length];
-               mgmt_device_found(hdev, &ev->bdaddr, LE_LINK, ev->bdaddr_type,
-                                 NULL, rssi, 0, 1, ev->data, ev->length);
+               process_adv_report(hdev, ev->evt_type, &ev->bdaddr,
+                                  ev->bdaddr_type, rssi, ev->data, ev->length);
 
                ptr += sizeof(*ev) + ev->length + 1;
        }
index b9a418e578e0000ec4ad68734b3e34575cd759b5..f608bffdb8b940915ed160b18603c0733aee63a0 100644 (file)
@@ -524,16 +524,7 @@ static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
        case HCISETRAW:
                if (!capable(CAP_NET_ADMIN))
                        return -EPERM;
-
-               if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
-                       return -EPERM;
-
-               if (arg)
-                       set_bit(HCI_RAW, &hdev->flags);
-               else
-                       clear_bit(HCI_RAW, &hdev->flags);
-
-               return 0;
+               return -EOPNOTSUPP;
 
        case HCIGETCONNINFO:
                return hci_get_conn_info(hdev, (void __user *) arg);
index b3fbc73516c415ee1654eb6f2aa38d5e390fb00b..941ad7530eda48f21dcf7faef9167cbce6574a8e 100644 (file)
@@ -58,6 +58,7 @@ int bt_to_errno(__u16 code)
                return EIO;
 
        case 0x04:
+       case 0x3c:
                return EHOSTDOWN;
 
        case 0x05:
index d2d4e0d5aed017366668bf263538baf332255d20..54abbce3a39e8bc0cdbd9018ca8e616e5c864546 100644 (file)
@@ -2850,10 +2850,7 @@ static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
        }
 
        sec_level = BT_SECURITY_MEDIUM;
-       if (cp->io_cap == 0x03)
-               auth_type = HCI_AT_DEDICATED_BONDING;
-       else
-               auth_type = HCI_AT_DEDICATED_BONDING_MITM;
+       auth_type = HCI_AT_DEDICATED_BONDING;
 
        if (cp->addr.type == BDADDR_BREDR) {
                conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
@@ -3351,6 +3348,8 @@ static int mgmt_start_discovery_failed(struct hci_dev *hdev, u8 status)
 
 static void start_discovery_complete(struct hci_dev *hdev, u8 status)
 {
+       unsigned long timeout = 0;
+
        BT_DBG("status %d", status);
 
        if (status) {
@@ -3366,13 +3365,11 @@ static void start_discovery_complete(struct hci_dev *hdev, u8 status)
 
        switch (hdev->discovery.type) {
        case DISCOV_TYPE_LE:
-               queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable,
-                                  DISCOV_LE_TIMEOUT);
+               timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
                break;
 
        case DISCOV_TYPE_INTERLEAVED:
-               queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable,
-                                  DISCOV_INTERLEAVED_TIMEOUT);
+               timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
                break;
 
        case DISCOV_TYPE_BREDR:
@@ -3381,6 +3378,11 @@ static void start_discovery_complete(struct hci_dev *hdev, u8 status)
        default:
                BT_ERR("Invalid discovery type %d", hdev->discovery.type);
        }
+
+       if (!timeout)
+               return;
+
+       queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable, timeout);
 }
 
 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
@@ -5668,8 +5670,9 @@ void mgmt_read_local_oob_data_complete(struct hci_dev *hdev, u8 *hash192,
 }
 
 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
-                      u8 addr_type, u8 *dev_class, s8 rssi, u8 cfm_name, u8
-                      ssp, u8 *eir, u16 eir_len)
+                      u8 addr_type, u8 *dev_class, s8 rssi, u8 cfm_name,
+                      u8 ssp, u8 *eir, u16 eir_len, u8 *scan_rsp,
+                      u8 scan_rsp_len)
 {
        char buf[512];
        struct mgmt_ev_device_found *ev = (void *) buf;
@@ -5679,8 +5682,10 @@ void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
        if (!hci_discovery_active(hdev))
                return;
 
-       /* Leave 5 bytes for a potential CoD field */
-       if (sizeof(*ev) + eir_len + 5 > sizeof(buf))
+       /* Make sure that the buffer is big enough. The 5 extra bytes
+        * are for the potential CoD field.
+        */
+       if (sizeof(*ev) + eir_len + scan_rsp_len + 5 > sizeof(buf))
                return;
 
        memset(buf, 0, sizeof(buf));
@@ -5707,8 +5712,11 @@ void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
                eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
                                          dev_class, 3);
 
-       ev->eir_len = cpu_to_le16(eir_len);
-       ev_size = sizeof(*ev) + eir_len;
+       if (scan_rsp_len > 0)
+               memcpy(ev->eir + eir_len, scan_rsp, scan_rsp_len);
+
+       ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
+       ev_size = sizeof(*ev) + eir_len + scan_rsp_len;
 
        mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
 }
index e85498b2f1669f7dae3b7f5f5e1c0970b1be65c6..29b6e2a8ca9e3153197f9f50eb2b657f90b68974 100644 (file)
@@ -5,7 +5,7 @@
 obj-$(CONFIG_BRIDGE) += bridge.o
 
 bridge-y       := br.o br_device.o br_fdb.o br_forward.o br_if.o br_input.o \
-                       br_ioctl.o br_notify.o br_stp.o br_stp_bpdu.o \
+                       br_ioctl.o br_stp.o br_stp_bpdu.o \
                        br_stp_if.o br_stp_timer.o br_netlink.o
 
 bridge-$(CONFIG_SYSFS) += br_sysfs_if.o br_sysfs_br.o
@@ -16,4 +16,4 @@ bridge-$(CONFIG_BRIDGE_IGMP_SNOOPING) += br_multicast.o br_mdb.o
 
 bridge-$(CONFIG_BRIDGE_VLAN_FILTERING) += br_vlan.o
 
-obj-$(CONFIG_BRIDGE_NF_EBTABLES) += netfilter/
+obj-$(CONFIG_BRIDGE_NETFILTER) += netfilter/
index 19311aafcf5a06e6ae7abaaf82527711573dc6d6..1a755a1e54101d924e88ea240a82c154dcb7bbe5 100644 (file)
 
 #include "br_private.h"
 
+/*
+ * Handle changes in state of network devices enslaved to a bridge.
+ *
+ * Note: don't care about up/down if bridge itself is down, because
+ *     port state is checked when bridge is brought up.
+ */
+static int br_device_event(struct notifier_block *unused, unsigned long event, void *ptr)
+{
+       struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+       struct net_bridge_port *p;
+       struct net_bridge *br;
+       bool changed_addr;
+       int err;
+
+       /* register of bridge completed, add sysfs entries */
+       if ((dev->priv_flags & IFF_EBRIDGE) && event == NETDEV_REGISTER) {
+               br_sysfs_addbr(dev);
+               return NOTIFY_DONE;
+       }
+
+       /* not a port of a bridge */
+       p = br_port_get_rtnl(dev);
+       if (!p)
+               return NOTIFY_DONE;
+
+       br = p->br;
+
+       switch (event) {
+       case NETDEV_CHANGEMTU:
+               dev_set_mtu(br->dev, br_min_mtu(br));
+               break;
+
+       case NETDEV_CHANGEADDR:
+               spin_lock_bh(&br->lock);
+               br_fdb_changeaddr(p, dev->dev_addr);
+               changed_addr = br_stp_recalculate_bridge_id(br);
+               spin_unlock_bh(&br->lock);
+
+               if (changed_addr)
+                       call_netdevice_notifiers(NETDEV_CHANGEADDR, br->dev);
+
+               break;
+
+       case NETDEV_CHANGE:
+               br_port_carrier_check(p);
+               break;
+
+       case NETDEV_FEAT_CHANGE:
+               netdev_update_features(br->dev);
+               break;
+
+       case NETDEV_DOWN:
+               spin_lock_bh(&br->lock);
+               if (br->dev->flags & IFF_UP)
+                       br_stp_disable_port(p);
+               spin_unlock_bh(&br->lock);
+               break;
+
+       case NETDEV_UP:
+               if (netif_running(br->dev) && netif_oper_up(dev)) {
+                       spin_lock_bh(&br->lock);
+                       br_stp_enable_port(p);
+                       spin_unlock_bh(&br->lock);
+               }
+               break;
+
+       case NETDEV_UNREGISTER:
+               br_del_if(br, dev);
+               break;
+
+       case NETDEV_CHANGENAME:
+               err = br_sysfs_renameif(p);
+               if (err)
+                       return notifier_from_errno(err);
+               break;
+
+       case NETDEV_PRE_TYPE_CHANGE:
+               /* Forbid underlaying device to change its type. */
+               return NOTIFY_BAD;
+
+       case NETDEV_RESEND_IGMP:
+               /* Propagate to master device */
+               call_netdevice_notifiers(event, br->dev);
+               break;
+       }
+
+       /* Events that may cause spanning tree to refresh */
+       if (event == NETDEV_CHANGEADDR || event == NETDEV_UP ||
+           event == NETDEV_CHANGE || event == NETDEV_DOWN)
+               br_ifinfo_notify(RTM_NEWLINK, p);
+
+       return NOTIFY_DONE;
+}
+
+static struct notifier_block br_device_notifier = {
+       .notifier_call = br_device_event
+};
+
 static void __net_exit br_net_exit(struct net *net)
 {
        struct net_device *dev;
index 3e2da2cb72db1725f064ec21d3ce6ab8765532c1..d77e2f0ff0e9d5d9644a3f4df02446be6fea15cb 100644 (file)
@@ -112,6 +112,12 @@ static void br_dev_set_multicast_list(struct net_device *dev)
 {
 }
 
+static void br_dev_change_rx_flags(struct net_device *dev, int change)
+{
+       if (change & IFF_PROMISC)
+               br_manage_promisc(netdev_priv(dev));
+}
+
 static int br_dev_stop(struct net_device *dev)
 {
        struct net_bridge *br = netdev_priv(dev);
@@ -309,6 +315,7 @@ static const struct net_device_ops br_netdev_ops = {
        .ndo_get_stats64         = br_get_stats64,
        .ndo_set_mac_address     = br_set_mac_address,
        .ndo_set_rx_mode         = br_dev_set_multicast_list,
+       .ndo_change_rx_flags     = br_dev_change_rx_flags,
        .ndo_change_mtu          = br_change_mtu,
        .ndo_do_ioctl            = br_dev_ioctl,
 #ifdef CONFIG_NET_POLL_CONTROLLER
@@ -348,7 +355,7 @@ void br_dev_setup(struct net_device *dev)
 
        dev->netdev_ops = &br_netdev_ops;
        dev->destructor = br_dev_free;
-       SET_ETHTOOL_OPS(dev, &br_ethtool_ops);
+       dev->ethtool_ops = &br_ethtool_ops;
        SET_NETDEV_DEVTYPE(dev, &br_type);
        dev->tx_queue_len = 0;
        dev->priv_flags = IFF_EBRIDGE;
index 9203d5a1943fbd4ba272ae38e742d8692093f7f1..648d0e84959567c09f0692675a4742a4c939c3d8 100644 (file)
@@ -85,8 +85,58 @@ static void fdb_rcu_free(struct rcu_head *head)
        kmem_cache_free(br_fdb_cache, ent);
 }
 
+/* When a static FDB entry is added, the mac address from the entry is
+ * added to the bridge private HW address list and all required ports
+ * are then updated with the new information.
+ * Called under RTNL.
+ */
+static void fdb_add_hw(struct net_bridge *br, const unsigned char *addr)
+{
+       int err;
+       struct net_bridge_port *p, *tmp;
+
+       ASSERT_RTNL();
+
+       list_for_each_entry(p, &br->port_list, list) {
+               if (!br_promisc_port(p)) {
+                       err = dev_uc_add(p->dev, addr);
+                       if (err)
+                               goto undo;
+               }
+       }
+
+       return;
+undo:
+       list_for_each_entry(tmp, &br->port_list, list) {
+               if (tmp == p)
+                       break;
+               if (!br_promisc_port(tmp))
+                       dev_uc_del(tmp->dev, addr);
+       }
+}
+
+/* When a static FDB entry is deleted, the HW address from that entry is
+ * also removed from the bridge private HW address list and updates all
+ * the ports with needed information.
+ * Called under RTNL.
+ */
+static void fdb_del_hw(struct net_bridge *br, const unsigned char *addr)
+{
+       struct net_bridge_port *p;
+
+       ASSERT_RTNL();
+
+       list_for_each_entry(p, &br->port_list, list) {
+               if (!br_promisc_port(p))
+                       dev_uc_del(p->dev, addr);
+       }
+}
+
 static void fdb_delete(struct net_bridge *br, struct net_bridge_fdb_entry *f)
 {
+       if (f->is_static)
+               fdb_del_hw(br, f->addr.addr);
+
        hlist_del_rcu(&f->hlist);
        fdb_notify(br, f, RTM_DELNEIGH);
        call_rcu(&f->rcu, fdb_rcu_free);
@@ -466,6 +516,7 @@ static int fdb_insert(struct net_bridge *br, struct net_bridge_port *source,
                return -ENOMEM;
 
        fdb->is_local = fdb->is_static = 1;
+       fdb_add_hw(br, addr);
        fdb_notify(br, fdb, RTM_NEWNEIGH);
        return 0;
 }
@@ -678,13 +729,25 @@ static int fdb_add_entry(struct net_bridge_port *source, const __u8 *addr,
        }
 
        if (fdb_to_nud(fdb) != state) {
-               if (state & NUD_PERMANENT)
-                       fdb->is_local = fdb->is_static = 1;
-               else if (state & NUD_NOARP) {
+               if (state & NUD_PERMANENT) {
+                       fdb->is_local = 1;
+                       if (!fdb->is_static) {
+                               fdb->is_static = 1;
+                               fdb_add_hw(br, addr);
+                       }
+               } else if (state & NUD_NOARP) {
+                       fdb->is_local = 0;
+                       if (!fdb->is_static) {
+                               fdb->is_static = 1;
+                               fdb_add_hw(br, addr);
+                       }
+               } else {
                        fdb->is_local = 0;
-                       fdb->is_static = 1;
-               } else
-                       fdb->is_local = fdb->is_static = 0;
+                       if (fdb->is_static) {
+                               fdb->is_static = 0;
+                               fdb_del_hw(br, addr);
+                       }
+               }
 
                modified = true;
        }
@@ -874,3 +937,59 @@ int br_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[],
 out:
        return err;
 }
+
+int br_fdb_sync_static(struct net_bridge *br, struct net_bridge_port *p)
+{
+       struct net_bridge_fdb_entry *fdb, *tmp;
+       int i;
+       int err;
+
+       ASSERT_RTNL();
+
+       for (i = 0; i < BR_HASH_SIZE; i++) {
+               hlist_for_each_entry(fdb, &br->hash[i], hlist) {
+                       /* We only care for static entries */
+                       if (!fdb->is_static)
+                               continue;
+
+                       err = dev_uc_add(p->dev, fdb->addr.addr);
+                       if (err)
+                               goto rollback;
+               }
+       }
+       return 0;
+
+rollback:
+       for (i = 0; i < BR_HASH_SIZE; i++) {
+               hlist_for_each_entry(tmp, &br->hash[i], hlist) {
+                       /* If we reached the fdb that failed, we can stop */
+                       if (tmp == fdb)
+                               break;
+
+                       /* We only care for static entries */
+                       if (!tmp->is_static)
+                               continue;
+
+                       dev_uc_del(p->dev, tmp->addr.addr);
+               }
+       }
+       return err;
+}
+
+void br_fdb_unsync_static(struct net_bridge *br, struct net_bridge_port *p)
+{
+       struct net_bridge_fdb_entry *fdb;
+       int i;
+
+       ASSERT_RTNL();
+
+       for (i = 0; i < BR_HASH_SIZE; i++) {
+               hlist_for_each_entry_rcu(fdb, &br->hash[i], hlist) {
+                       /* We only care for static entries */
+                       if (!fdb->is_static)
+                               continue;
+
+                       dev_uc_del(p->dev, fdb->addr.addr);
+               }
+       }
+}
index 5262b8617eb9cc21b1070e48d1c1efda584aef6f..104a811dde571173973d7082c2b1886fa4a44647 100644 (file)
@@ -85,6 +85,110 @@ void br_port_carrier_check(struct net_bridge_port *p)
        spin_unlock_bh(&br->lock);
 }
 
+static void br_port_set_promisc(struct net_bridge_port *p)
+{
+       int err = 0;
+
+       if (br_promisc_port(p))
+               return;
+
+       err = dev_set_promiscuity(p->dev, 1);
+       if (err)
+               return;
+
+       br_fdb_unsync_static(p->br, p);
+       p->flags |= BR_PROMISC;
+}
+
+static void br_port_clear_promisc(struct net_bridge_port *p)
+{
+       int err;
+
+       /* Check if the port is already non-promisc or if it doesn't
+        * support UNICAST filtering.  Without unicast filtering support
+        * we'll end up re-enabling promisc mode anyway, so just check for
+        * it here.
+        */
+       if (!br_promisc_port(p) || !(p->dev->priv_flags & IFF_UNICAST_FLT))
+               return;
+
+       /* Since we'll be clearing the promisc mode, program the port
+        * first so that we don't have interruption in traffic.
+        */
+       err = br_fdb_sync_static(p->br, p);
+       if (err)
+               return;
+
+       dev_set_promiscuity(p->dev, -1);
+       p->flags &= ~BR_PROMISC;
+}
+
+/* When a port is added or removed or when certain port flags
+ * change, this function is called to automatically manage
+ * promiscuity setting of all the bridge ports.  We are always called
+ * under RTNL so can skip using rcu primitives.
+ */
+void br_manage_promisc(struct net_bridge *br)
+{
+       struct net_bridge_port *p;
+       bool set_all = false;
+
+       /* If vlan filtering is disabled or bridge interface is placed
+        * into promiscuous mode, place all ports in promiscuous mode.
+        */
+       if ((br->dev->flags & IFF_PROMISC) || !br_vlan_enabled(br))
+               set_all = true;
+
+       list_for_each_entry(p, &br->port_list, list) {
+               if (set_all) {
+                       br_port_set_promisc(p);
+               } else {
+                       /* If the number of auto-ports is <= 1, then all other
+                        * ports will have their output configuration
+                        * statically specified through fdbs.  Since ingress
+                        * on the auto-port becomes forwarding/egress to other
+                        * ports and egress configuration is statically known,
+                        * we can say that ingress configuration of the
+                        * auto-port is also statically known.
+                        * This lets us disable promiscuous mode and write
+                        * this config to hw.
+                        */
+                       if (br->auto_cnt <= br_auto_port(p))
+                               br_port_clear_promisc(p);
+                       else
+                               br_port_set_promisc(p);
+               }
+       }
+}
+
+static void nbp_update_port_count(struct net_bridge *br)
+{
+       struct net_bridge_port *p;
+       u32 cnt = 0;
+
+       list_for_each_entry(p, &br->port_list, list) {
+               if (br_auto_port(p))
+                       cnt++;
+       }
+       if (br->auto_cnt != cnt) {
+               br->auto_cnt = cnt;
+               br_manage_promisc(br);
+       }
+}
+
+static void nbp_delete_promisc(struct net_bridge_port *p)
+{
+       /* If port is currently promiscuous, unset promiscuity.
+        * Otherwise, it is a static port so remove all addresses
+        * from it.
+        */
+       dev_set_allmulti(p->dev, -1);
+       if (br_promisc_port(p))
+               dev_set_promiscuity(p->dev, -1);
+       else
+               br_fdb_unsync_static(p->br, p);
+}
+
 static void release_nbp(struct kobject *kobj)
 {
        struct net_bridge_port *p
@@ -133,7 +237,7 @@ static void del_nbp(struct net_bridge_port *p)
 
        sysfs_remove_link(br->ifobj, p->dev->name);
 
-       dev_set_promiscuity(dev, -1);
+       nbp_delete_promisc(p);
 
        spin_lock_bh(&br->lock);
        br_stp_disable_port(p);
@@ -141,10 +245,11 @@ static void del_nbp(struct net_bridge_port *p)
 
        br_ifinfo_notify(RTM_DELLINK, p);
 
+       list_del_rcu(&p->list);
+
        nbp_vlan_flush(p);
        br_fdb_delete_by_port(br, p, 1);
-
-       list_del_rcu(&p->list);
+       nbp_update_port_count(br);
 
        dev->priv_flags &= ~IFF_BRIDGE_PORT;
 
@@ -353,7 +458,7 @@ int br_add_if(struct net_bridge *br, struct net_device *dev)
 
        call_netdevice_notifiers(NETDEV_JOIN, dev);
 
-       err = dev_set_promiscuity(dev, 1);
+       err = dev_set_allmulti(dev, 1);
        if (err)
                goto put_back;
 
@@ -384,6 +489,8 @@ int br_add_if(struct net_bridge *br, struct net_device *dev)
 
        list_add_rcu(&p->list, &br->port_list);
 
+       nbp_update_port_count(br);
+
        netdev_update_features(br->dev);
 
        if (br->dev->needed_headroom < dev->needed_headroom)
@@ -455,3 +562,11 @@ int br_del_if(struct net_bridge *br, struct net_device *dev)
 
        return 0;
 }
+
+void br_port_flags_change(struct net_bridge_port *p, unsigned long mask)
+{
+       struct net_bridge *br = p->br;
+
+       if (mask & BR_AUTO_MASK)
+               nbp_update_port_count(br);
+}
index 80e1b0f60a30214002684a42b1bab1a02e9d9962..a615264cf01a950aafd894109d43f55cfd8dff91 100644 (file)
@@ -535,7 +535,7 @@ static struct net_device *brnf_get_logical_dev(struct sk_buff *skb, const struct
        if (brnf_pass_vlan_indev == 0 || !vlan_tx_tag_present(skb))
                return br;
 
-       vlan = __vlan_find_dev_deep(br, skb->vlan_proto,
+       vlan = __vlan_find_dev_deep_rcu(br, skb->vlan_proto,
                                    vlan_tx_tag_get(skb) & VLAN_VID_MASK);
 
        return vlan ? vlan : br;
@@ -859,12 +859,12 @@ static unsigned int br_nf_forward_arp(const struct nf_hook_ops *ops,
        return NF_STOLEN;
 }
 
-#if IS_ENABLED(CONFIG_NF_CONNTRACK_IPV4)
+#if IS_ENABLED(CONFIG_NF_DEFRAG_IPV4)
 static int br_nf_dev_queue_xmit(struct sk_buff *skb)
 {
        int ret;
 
-       if (skb->nfct != NULL && skb->protocol == htons(ETH_P_IP) &&
+       if (skb->protocol == htons(ETH_P_IP) &&
            skb->len + nf_bridge_mtu_reduction(skb) > skb->dev->mtu &&
            !skb_is_gso(skb)) {
                if (br_parse_ip_options(skb))
index e74b6d530cb6a3ab3dc650ee1900df06d3b3441a..26edb518b839b38240ab1de7cb619cd5ba924b6f 100644 (file)
@@ -328,6 +328,7 @@ static void br_set_port_flag(struct net_bridge_port *p, struct nlattr *tb[],
 static int br_setport(struct net_bridge_port *p, struct nlattr *tb[])
 {
        int err;
+       unsigned long old_flags = p->flags;
 
        br_set_port_flag(p, tb, IFLA_BRPORT_MODE, BR_HAIRPIN_MODE);
        br_set_port_flag(p, tb, IFLA_BRPORT_GUARD, BR_BPDU_GUARD);
@@ -353,6 +354,8 @@ static int br_setport(struct net_bridge_port *p, struct nlattr *tb[])
                if (err)
                        return err;
        }
+
+       br_port_flags_change(p, old_flags ^ p->flags);
        return 0;
 }
 
@@ -445,6 +448,20 @@ static int br_validate(struct nlattr *tb[], struct nlattr *data[])
        return 0;
 }
 
+static int br_dev_newlink(struct net *src_net, struct net_device *dev,
+                         struct nlattr *tb[], struct nlattr *data[])
+{
+       struct net_bridge *br = netdev_priv(dev);
+
+       if (tb[IFLA_ADDRESS]) {
+               spin_lock_bh(&br->lock);
+               br_stp_change_bridge_id(br, nla_data(tb[IFLA_ADDRESS]));
+               spin_unlock_bh(&br->lock);
+       }
+
+       return register_netdevice(dev);
+}
+
 static size_t br_get_link_af_size(const struct net_device *dev)
 {
        struct net_port_vlans *pv;
@@ -473,6 +490,7 @@ struct rtnl_link_ops br_link_ops __read_mostly = {
        .priv_size      = sizeof(struct net_bridge),
        .setup          = br_dev_setup,
        .validate       = br_validate,
+       .newlink        = br_dev_newlink,
        .dellink        = br_dev_delete,
 };
 
diff --git a/net/bridge/br_notify.c b/net/bridge/br_notify.c
deleted file mode 100644 (file)
index 2998dd1..0000000
+++ /dev/null
@@ -1,118 +0,0 @@
-/*
- *     Device event handling
- *     Linux ethernet bridge
- *
- *     Authors:
- *     Lennert Buytenhek               <buytenh@gnu.org>
- *
- *     This program is free software; you can redistribute it and/or
- *     modify it under the terms of the GNU General Public License
- *     as published by the Free Software Foundation; either version
- *     2 of the License, or (at your option) any later version.
- */
-
-#include <linux/kernel.h>
-#include <linux/rtnetlink.h>
-#include <net/net_namespace.h>
-
-#include "br_private.h"
-
-static int br_device_event(struct notifier_block *unused, unsigned long event, void *ptr);
-
-struct notifier_block br_device_notifier = {
-       .notifier_call = br_device_event
-};
-
-/*
- * Handle changes in state of network devices enslaved to a bridge.
- *
- * Note: don't care about up/down if bridge itself is down, because
- *     port state is checked when bridge is brought up.
- */
-static int br_device_event(struct notifier_block *unused, unsigned long event, void *ptr)
-{
-       struct net_device *dev = netdev_notifier_info_to_dev(ptr);
-       struct net_bridge_port *p;
-       struct net_bridge *br;
-       bool changed_addr;
-       int err;
-
-       /* register of bridge completed, add sysfs entries */
-       if ((dev->priv_flags & IFF_EBRIDGE) && event == NETDEV_REGISTER) {
-               br_sysfs_addbr(dev);
-               return NOTIFY_DONE;
-       }
-
-       /* not a port of a bridge */
-       p = br_port_get_rtnl(dev);
-       if (!p)
-               return NOTIFY_DONE;
-
-       br = p->br;
-
-       switch (event) {
-       case NETDEV_CHANGEMTU:
-               dev_set_mtu(br->dev, br_min_mtu(br));
-               break;
-
-       case NETDEV_CHANGEADDR:
-               spin_lock_bh(&br->lock);
-               br_fdb_changeaddr(p, dev->dev_addr);
-               changed_addr = br_stp_recalculate_bridge_id(br);
-               spin_unlock_bh(&br->lock);
-
-               if (changed_addr)
-                       call_netdevice_notifiers(NETDEV_CHANGEADDR, br->dev);
-
-               break;
-
-       case NETDEV_CHANGE:
-               br_port_carrier_check(p);
-               break;
-
-       case NETDEV_FEAT_CHANGE:
-               netdev_update_features(br->dev);
-               break;
-
-       case NETDEV_DOWN:
-               spin_lock_bh(&br->lock);
-               if (br->dev->flags & IFF_UP)
-                       br_stp_disable_port(p);
-               spin_unlock_bh(&br->lock);
-               break;
-
-       case NETDEV_UP:
-               if (netif_running(br->dev) && netif_oper_up(dev)) {
-                       spin_lock_bh(&br->lock);
-                       br_stp_enable_port(p);
-                       spin_unlock_bh(&br->lock);
-               }
-               break;
-
-       case NETDEV_UNREGISTER:
-               br_del_if(br, dev);
-               break;
-
-       case NETDEV_CHANGENAME:
-               err = br_sysfs_renameif(p);
-               if (err)
-                       return notifier_from_errno(err);
-               break;
-
-       case NETDEV_PRE_TYPE_CHANGE:
-               /* Forbid underlaying device to change its type. */
-               return NOTIFY_BAD;
-
-       case NETDEV_RESEND_IGMP:
-               /* Propagate to master device */
-               call_netdevice_notifiers(event, br->dev);
-               break;
-       }
-
-       /* Events that may cause spanning tree to refresh */
-       if (event == NETDEV_CHANGEADDR || event == NETDEV_UP ||
-           event == NETDEV_CHANGE || event == NETDEV_DOWN)
-               br_ifinfo_notify(RTM_NEWLINK, p);
-
-       return NOTIFY_DONE;
-}
index 06811d79f89f9e7712344d99fdc97194c62f0aef..53d6e32965fcd027dfc2b2b7f87baf88650c9f6c 100644 (file)
@@ -174,6 +174,8 @@ struct net_bridge_port
 #define BR_ADMIN_COST          0x00000010
 #define BR_LEARNING            0x00000020
 #define BR_FLOOD               0x00000040
+#define BR_AUTO_MASK (BR_FLOOD | BR_LEARNING)
+#define BR_PROMISC             0x00000080
 
 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING
        struct bridge_mcast_query       ip4_query;
@@ -198,6 +200,9 @@ struct net_bridge_port
 #endif
 };
 
+#define br_auto_port(p) ((p)->flags & BR_AUTO_MASK)
+#define br_promisc_port(p) ((p)->flags & BR_PROMISC)
+
 #define br_port_exists(dev) (dev->priv_flags & IFF_BRIDGE_PORT)
 
 static inline struct net_bridge_port *br_port_get_rcu(const struct net_device *dev)
@@ -290,6 +295,7 @@ struct net_bridge
        struct timer_list               topology_change_timer;
        struct timer_list               gc_timer;
        struct kobject                  *ifobj;
+       u32                             auto_cnt;
 #ifdef CONFIG_BRIDGE_VLAN_FILTERING
        u8                              vlan_enabled;
        struct net_port_vlans __rcu     *vlan_info;
@@ -327,8 +333,6 @@ struct br_input_skb_cb {
 #define br_debug(br, format, args...)                  \
        pr_debug("%s: " format,  (br)->dev->name, ##args)
 
-extern struct notifier_block br_device_notifier;
-
 /* called under bridge lock */
 static inline int br_is_root_bridge(const struct net_bridge *br)
 {
@@ -395,6 +399,8 @@ int br_fdb_add(struct ndmsg *nlh, struct nlattr *tb[], struct net_device *dev,
               const unsigned char *addr, u16 nlh_flags);
 int br_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
                struct net_device *dev, int idx);
+int br_fdb_sync_static(struct net_bridge *br, struct net_bridge_port *p);
+void br_fdb_unsync_static(struct net_bridge *br, struct net_bridge_port *p);
 
 /* br_forward.c */
 void br_deliver(const struct net_bridge_port *to, struct sk_buff *skb);
@@ -415,6 +421,8 @@ int br_del_if(struct net_bridge *br, struct net_device *dev);
 int br_min_mtu(const struct net_bridge *br);
 netdev_features_t br_features_recompute(struct net_bridge *br,
                                        netdev_features_t features);
+void br_port_flags_change(struct net_bridge_port *port, unsigned long mask);
+void br_manage_promisc(struct net_bridge *br);
 
 /* br_input.c */
 int br_handle_frame_finish(struct sk_buff *skb);
@@ -632,6 +640,10 @@ static inline u16 br_get_pvid(const struct net_port_vlans *v)
        return v->pvid ?: VLAN_N_VID;
 }
 
+static inline int br_vlan_enabled(struct net_bridge *br)
+{
+       return br->vlan_enabled;
+}
 #else
 static inline bool br_allowed_ingress(struct net_bridge *br,
                                      struct net_port_vlans *v,
@@ -712,6 +724,11 @@ static inline u16 br_get_pvid(const struct net_port_vlans *v)
 {
        return VLAN_N_VID;      /* Returns invalid vid */
 }
+
+static inline int br_vlan_enabled(struct net_bridge *br)
+{
+       return 0;
+}
 #endif
 
 /* br_netfilter.c */
index dd595bd7fa820444f6e8c424eb7dd4f307998af2..e561cd59b8a6ef0e764b3028d350b13954deac05 100644 (file)
@@ -41,20 +41,30 @@ static ssize_t show_##_name(struct net_bridge_port *p, char *buf) \
 }                                                              \
 static int store_##_name(struct net_bridge_port *p, unsigned long v) \
 {                                                              \
-       unsigned long flags = p->flags;                         \
-       if (v)                                                  \
-               flags |= _mask;                                 \
-       else                                                    \
-               flags &= ~_mask;                                \
-       if (flags != p->flags) {                                \
-               p->flags = flags;                               \
-               br_ifinfo_notify(RTM_NEWLINK, p);               \
-       }                                                       \
-       return 0;                                               \
+       return store_flag(p, v, _mask);                         \
 }                                                              \
 static BRPORT_ATTR(_name, S_IRUGO | S_IWUSR,                   \
                   show_##_name, store_##_name)
 
+static int store_flag(struct net_bridge_port *p, unsigned long v,
+                     unsigned long mask)
+{
+       unsigned long flags;
+
+       flags = p->flags;
+
+       if (v)
+               flags |= mask;
+       else
+               flags &= ~mask;
+
+       if (flags != p->flags) {
+               p->flags = flags;
+               br_port_flags_change(p, mask);
+               br_ifinfo_notify(RTM_NEWLINK, p);
+       }
+       return 0;
+}
 
 static ssize_t show_path_cost(struct net_bridge_port *p, char *buf)
 {
index 4a37161027899ab12d0f2dd5f4c5d69a1ed98716..24c5cc55589f128a9a8b630b5cc75ada3a27b6c3 100644 (file)
@@ -332,6 +332,7 @@ int br_vlan_filter_toggle(struct net_bridge *br, unsigned long val)
                goto unlock;
 
        br->vlan_enabled = val;
+       br_manage_promisc(br);
 
 unlock:
        rtnl_unlock();
index 5ca74a0e595fe5ce782985f6b2e17f185b5a52ec..3baf29d34e62fb09a02e9028bea402a94874c3b3 100644 (file)
@@ -2,13 +2,25 @@
 # Bridge netfilter configuration
 #
 #
-config NF_TABLES_BRIDGE
+menuconfig NF_TABLES_BRIDGE
        depends on NF_TABLES
+       select BRIDGE_NETFILTER
        tristate "Ethernet Bridge nf_tables support"
 
+if NF_TABLES_BRIDGE
+
+config NFT_BRIDGE_META
+       tristate "Netfilter nf_table bridge meta support"
+       depends on NFT_META
+       help
+         Add support for bridge dedicated meta key.
+
+endif # NF_TABLES_BRIDGE
+
 menuconfig BRIDGE_NF_EBTABLES
        tristate "Ethernet Bridge tables (ebtables) support"
        depends on BRIDGE && NETFILTER
+       select BRIDGE_NETFILTER
        select NETFILTER_XTABLES
        help
          ebtables is a general, extensible frame/packet identification
index ea7629f58b3d1c44e28524df8a0937de3a18546b..6f2f3943d66f34b43c72be21b603bbf51ba0d289 100644 (file)
@@ -3,6 +3,7 @@
 #
 
 obj-$(CONFIG_NF_TABLES_BRIDGE) += nf_tables_bridge.o
+obj-$(CONFIG_NFT_BRIDGE_META)  += nft_meta_bridge.o
 
 obj-$(CONFIG_BRIDGE_NF_EBTABLES) += ebtables.o
 
diff --git a/net/bridge/netfilter/nft_meta_bridge.c b/net/bridge/netfilter/nft_meta_bridge.c
new file mode 100644 (file)
index 0000000..4f02109
--- /dev/null
@@ -0,0 +1,139 @@
+/*
+ * Copyright (c) 2014 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/netlink.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter/nf_tables.h>
+#include <net/netfilter/nf_tables.h>
+#include <net/netfilter/nft_meta.h>
+
+#include "../br_private.h"
+
+static void nft_meta_bridge_get_eval(const struct nft_expr *expr,
+                                    struct nft_data data[NFT_REG_MAX + 1],
+                                    const struct nft_pktinfo *pkt)
+{
+       const struct nft_meta *priv = nft_expr_priv(expr);
+       const struct net_device *in = pkt->in, *out = pkt->out;
+       struct nft_data *dest = &data[priv->dreg];
+       const struct net_bridge_port *p;
+
+       switch (priv->key) {
+       case NFT_META_BRI_IIFNAME:
+               if (in == NULL || (p = br_port_get_rcu(in)) == NULL)
+                       goto err;
+               break;
+       case NFT_META_BRI_OIFNAME:
+               if (out == NULL || (p = br_port_get_rcu(out)) == NULL)
+                       goto err;
+               break;
+       default:
+               goto out;
+       }
+
+       strncpy((char *)dest->data, p->br->dev->name, sizeof(dest->data));
+       return;
+out:
+       return nft_meta_get_eval(expr, data, pkt);
+err:
+       data[NFT_REG_VERDICT].verdict = NFT_BREAK;
+}
+
+static int nft_meta_bridge_get_init(const struct nft_ctx *ctx,
+                                   const struct nft_expr *expr,
+                                   const struct nlattr * const tb[])
+{
+       struct nft_meta *priv = nft_expr_priv(expr);
+       int err;
+
+       priv->key = ntohl(nla_get_be32(tb[NFTA_META_KEY]));
+       switch (priv->key) {
+       case NFT_META_BRI_IIFNAME:
+       case NFT_META_BRI_OIFNAME:
+               break;
+       default:
+               return nft_meta_get_init(ctx, expr, tb);
+       }
+
+       priv->dreg = ntohl(nla_get_be32(tb[NFTA_META_DREG]));
+       err = nft_validate_output_register(priv->dreg);
+       if (err < 0)
+               return err;
+
+       err = nft_validate_data_load(ctx, priv->dreg, NULL, NFT_DATA_VALUE);
+       if (err < 0)
+               return err;
+
+       return 0;
+}
+
+static struct nft_expr_type nft_meta_bridge_type;
+static const struct nft_expr_ops nft_meta_bridge_get_ops = {
+       .type           = &nft_meta_bridge_type,
+       .size           = NFT_EXPR_SIZE(sizeof(struct nft_meta)),
+       .eval           = nft_meta_bridge_get_eval,
+       .init           = nft_meta_bridge_get_init,
+       .dump           = nft_meta_get_dump,
+};
+
+static const struct nft_expr_ops nft_meta_bridge_set_ops = {
+       .type           = &nft_meta_bridge_type,
+       .size           = NFT_EXPR_SIZE(sizeof(struct nft_meta)),
+       .eval           = nft_meta_set_eval,
+       .init           = nft_meta_set_init,
+       .dump           = nft_meta_set_dump,
+};
+
+static const struct nft_expr_ops *
+nft_meta_bridge_select_ops(const struct nft_ctx *ctx,
+                          const struct nlattr * const tb[])
+{
+       if (tb[NFTA_META_KEY] == NULL)
+               return ERR_PTR(-EINVAL);
+
+       if (tb[NFTA_META_DREG] && tb[NFTA_META_SREG])
+               return ERR_PTR(-EINVAL);
+
+       if (tb[NFTA_META_DREG])
+               return &nft_meta_bridge_get_ops;
+
+       if (tb[NFTA_META_SREG])
+               return &nft_meta_bridge_set_ops;
+
+       return ERR_PTR(-EINVAL);
+}
+
+static struct nft_expr_type nft_meta_bridge_type __read_mostly = {
+       .family         = NFPROTO_BRIDGE,
+       .name           = "meta",
+       .select_ops     = &nft_meta_bridge_select_ops,
+       .policy         = nft_meta_policy,
+       .maxattr        = NFTA_META_MAX,
+       .owner          = THIS_MODULE,
+};
+
+static int __init nft_meta_bridge_module_init(void)
+{
+       return nft_register_expr(&nft_meta_bridge_type);
+}
+
+static void __exit nft_meta_bridge_module_exit(void)
+{
+       nft_unregister_expr(&nft_meta_bridge_type);
+}
+
+module_init(nft_meta_bridge_module_init);
+module_exit(nft_meta_bridge_module_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Tomasz Bursztyka <tomasz.bursztyka@linux.intel.com>");
+MODULE_ALIAS_NFT_AF_EXPR(AF_BRIDGE, "meta");
index a27f8aad9e991f95cc5366bce3e975bff4f16bdd..ce82337521f665c5847819402d8a9c167452fb90 100644 (file)
@@ -337,6 +337,29 @@ static struct dev_rcv_lists *find_dev_rcv_lists(struct net_device *dev)
                return (struct dev_rcv_lists *)dev->ml_priv;
 }
 
+/**
+ * effhash - hash function for 29 bit CAN identifier reduction
+ * @can_id: 29 bit CAN identifier
+ *
+ * Description:
+ *  To reduce the linear traversal in one linked list of _single_ EFF CAN
+ *  frame subscriptions the 29 bit identifier is mapped to 10 bits.
+ *  (see CAN_EFF_RCV_HASH_BITS definition)
+ *
+ * Return:
+ *  Hash value from 0x000 - 0x3FF ( enforced by CAN_EFF_RCV_HASH_BITS mask )
+ */
+static unsigned int effhash(canid_t can_id)
+{
+       unsigned int hash;
+
+       hash = can_id;
+       hash ^= can_id >> CAN_EFF_RCV_HASH_BITS;
+       hash ^= can_id >> (2 * CAN_EFF_RCV_HASH_BITS);
+
+       return hash & ((1 << CAN_EFF_RCV_HASH_BITS) - 1);
+}
+
 /**
  * find_rcv_list - determine optimal filterlist inside device filter struct
  * @can_id: pointer to CAN identifier of a given can_filter
@@ -400,10 +423,8 @@ static struct hlist_head *find_rcv_list(canid_t *can_id, canid_t *mask,
            !(*can_id & CAN_RTR_FLAG)) {
 
                if (*can_id & CAN_EFF_FLAG) {
-                       if (*mask == (CAN_EFF_MASK | CAN_EFF_RTR_FLAGS)) {
-                               /* RFC: a future use-case for hash-tables? */
-                               return &d->rx[RX_EFF];
-                       }
+                       if (*mask == (CAN_EFF_MASK | CAN_EFF_RTR_FLAGS))
+                               return &d->rx_eff[effhash(*can_id)];
                } else {
                        if (*mask == (CAN_SFF_MASK | CAN_EFF_RTR_FLAGS))
                                return &d->rx_sff[*can_id];
@@ -632,7 +653,7 @@ static int can_rcv_filter(struct dev_rcv_lists *d, struct sk_buff *skb)
                return matches;
 
        if (can_id & CAN_EFF_FLAG) {
-               hlist_for_each_entry_rcu(r, &d->rx[RX_EFF], list) {
+               hlist_for_each_entry_rcu(r, &d->rx_eff[effhash(can_id)], list) {
                        if (r->can_id == can_id) {
                                deliver(skb, r);
                                matches++;
index 6de58b40535cc309e59f1a61ed624c930b7c7ffe..fca0fe9fc45a497cdf3da82d5414e846e7cc61b7 100644 (file)
@@ -59,12 +59,17 @@ struct receiver {
        char *ident;
 };
 
-enum { RX_ERR, RX_ALL, RX_FIL, RX_INV, RX_EFF, RX_MAX };
+#define CAN_SFF_RCV_ARRAY_SZ (1 << CAN_SFF_ID_BITS)
+#define CAN_EFF_RCV_HASH_BITS 10
+#define CAN_EFF_RCV_ARRAY_SZ (1 << CAN_EFF_RCV_HASH_BITS)
+
+enum { RX_ERR, RX_ALL, RX_FIL, RX_INV, RX_MAX };
 
 /* per device receive filters linked at dev->ml_priv */
 struct dev_rcv_lists {
        struct hlist_head rx[RX_MAX];
-       struct hlist_head rx_sff[0x800];
+       struct hlist_head rx_sff[CAN_SFF_RCV_ARRAY_SZ];
+       struct hlist_head rx_eff[CAN_EFF_RCV_ARRAY_SZ];
        int remove_on_zero_entries;
        int entries;
 };
index ac31891967da1ed811247d131dffbabfd7d28d11..050a2110d43f6b78f331b599569eaaf2d8803c24 100644 (file)
@@ -804,7 +804,7 @@ static int cgw_create_job(struct sk_buff *skb,  struct nlmsghdr *nlh)
        u8 limhops = 0;
        int err = 0;
 
-       if (!capable(CAP_NET_ADMIN))
+       if (!netlink_capable(skb, CAP_NET_ADMIN))
                return -EPERM;
 
        if (nlmsg_len(nlh) < sizeof(*r))
@@ -893,7 +893,7 @@ static int cgw_remove_job(struct sk_buff *skb, struct nlmsghdr *nlh)
        u8 limhops = 0;
        int err = 0;
 
-       if (!capable(CAP_NET_ADMIN))
+       if (!netlink_capable(skb, CAP_NET_ADMIN))
                return -EPERM;
 
        if (nlmsg_len(nlh) < sizeof(*r))
index b543470c8f8b5ef7e5196b7f895cf30a9b0db90e..1a19b985a8685b0aff4450acda4a8d4f429568bc 100644 (file)
@@ -80,7 +80,6 @@ static const char rx_list_name[][8] = {
        [RX_ALL] = "rx_all",
        [RX_FIL] = "rx_fil",
        [RX_INV] = "rx_inv",
-       [RX_EFF] = "rx_eff",
 };
 
 /*
@@ -389,25 +388,26 @@ static const struct file_operations can_rcvlist_proc_fops = {
        .release        = single_release,
 };
 
-static inline void can_rcvlist_sff_proc_show_one(struct seq_file *m,
-                                                struct net_device *dev,
-                                                struct dev_rcv_lists *d)
+static inline void can_rcvlist_proc_show_array(struct seq_file *m,
+                                              struct net_device *dev,
+                                              struct hlist_head *rcv_array,
+                                              unsigned int rcv_array_sz)
 {
-       int i;
+       unsigned int i;
        int all_empty = 1;
 
        /* check whether at least one list is non-empty */
-       for (i = 0; i < 0x800; i++)
-               if (!hlist_empty(&d->rx_sff[i])) {
+       for (i = 0; i < rcv_array_sz; i++)
+               if (!hlist_empty(&rcv_array[i])) {
                        all_empty = 0;
                        break;
                }
 
        if (!all_empty) {
                can_print_recv_banner(m);
-               for (i = 0; i < 0x800; i++) {
-                       if (!hlist_empty(&d->rx_sff[i]))
-                               can_print_rcvlist(m, &d->rx_sff[i], dev);
+               for (i = 0; i < rcv_array_sz; i++) {
+                       if (!hlist_empty(&rcv_array[i]))
+                               can_print_rcvlist(m, &rcv_array[i], dev);
                }
        } else
                seq_printf(m, "  (%s: no entry)\n", DNAME(dev));
@@ -425,12 +425,15 @@ static int can_rcvlist_sff_proc_show(struct seq_file *m, void *v)
 
        /* sff receive list for 'all' CAN devices (dev == NULL) */
        d = &can_rx_alldev_list;
-       can_rcvlist_sff_proc_show_one(m, NULL, d);
+       can_rcvlist_proc_show_array(m, NULL, d->rx_sff, ARRAY_SIZE(d->rx_sff));
 
        /* sff receive list for registered CAN devices */
        for_each_netdev_rcu(&init_net, dev) {
-               if (dev->type == ARPHRD_CAN && dev->ml_priv)
-                       can_rcvlist_sff_proc_show_one(m, dev, dev->ml_priv);
+               if (dev->type == ARPHRD_CAN && dev->ml_priv) {
+                       d = dev->ml_priv;
+                       can_rcvlist_proc_show_array(m, dev, d->rx_sff,
+                                                   ARRAY_SIZE(d->rx_sff));
+               }
        }
 
        rcu_read_unlock();
@@ -452,6 +455,49 @@ static const struct file_operations can_rcvlist_sff_proc_fops = {
        .release        = single_release,
 };
 
+
+static int can_rcvlist_eff_proc_show(struct seq_file *m, void *v)
+{
+       struct net_device *dev;
+       struct dev_rcv_lists *d;
+
+       /* RX_EFF */
+       seq_puts(m, "\nreceive list 'rx_eff':\n");
+
+       rcu_read_lock();
+
+       /* eff receive list for 'all' CAN devices (dev == NULL) */
+       d = &can_rx_alldev_list;
+       can_rcvlist_proc_show_array(m, NULL, d->rx_eff, ARRAY_SIZE(d->rx_eff));
+
+       /* eff receive list for registered CAN devices */
+       for_each_netdev_rcu(&init_net, dev) {
+               if (dev->type == ARPHRD_CAN && dev->ml_priv) {
+                       d = dev->ml_priv;
+                       can_rcvlist_proc_show_array(m, dev, d->rx_eff,
+                                                   ARRAY_SIZE(d->rx_eff));
+               }
+       }
+
+       rcu_read_unlock();
+
+       seq_putc(m, '\n');
+       return 0;
+}
+
+static int can_rcvlist_eff_proc_open(struct inode *inode, struct file *file)
+{
+       return single_open(file, can_rcvlist_eff_proc_show, NULL);
+}
+
+static const struct file_operations can_rcvlist_eff_proc_fops = {
+       .owner          = THIS_MODULE,
+       .open           = can_rcvlist_eff_proc_open,
+       .read           = seq_read,
+       .llseek         = seq_lseek,
+       .release        = single_release,
+};
+
 /*
  * proc utility functions
  */
@@ -491,8 +537,8 @@ void can_init_proc(void)
                                           &can_rcvlist_proc_fops, (void *)RX_FIL);
        pde_rcvlist_inv = proc_create_data(CAN_PROC_RCVLIST_INV, 0644, can_dir,
                                           &can_rcvlist_proc_fops, (void *)RX_INV);
-       pde_rcvlist_eff = proc_create_data(CAN_PROC_RCVLIST_EFF, 0644, can_dir,
-                                          &can_rcvlist_proc_fops, (void *)RX_EFF);
+       pde_rcvlist_eff = proc_create(CAN_PROC_RCVLIST_EFF, 0644, can_dir,
+                                     &can_rcvlist_eff_proc_fops);
        pde_rcvlist_sff = proc_create(CAN_PROC_RCVLIST_SFF, 0644, can_dir,
                                      &can_rcvlist_sff_proc_fops);
 }
index e632b5a52f5b89cb2e275b64494905cc7ebfc8e7..8b8a5a24b223ef268c28cf5e5ac5379314bba237 100644 (file)
@@ -1548,8 +1548,10 @@ static void apply_primary_affinity(struct ceph_osdmap *osdmap, u32 pps,
                return;
 
        for (i = 0; i < len; i++) {
-               if (osds[i] != CRUSH_ITEM_NONE &&
-                   osdmap->osd_primary_affinity[i] !=
+               int osd = osds[i];
+
+               if (osd != CRUSH_ITEM_NONE &&
+                   osdmap->osd_primary_affinity[osd] !=
                                        CEPH_OSD_DEFAULT_PRIMARY_AFFINITY) {
                        break;
                }
@@ -1563,10 +1565,9 @@ static void apply_primary_affinity(struct ceph_osdmap *osdmap, u32 pps,
         * osd's pgs get rejected as primary.
         */
        for (i = 0; i < len; i++) {
-               int osd;
+               int osd = osds[i];
                u32 aff;
 
-               osd = osds[i];
                if (osd == CRUSH_ITEM_NONE)
                        continue;
 
index 826b925aa4530a0de280b7b01442beb191cf8b5c..71093d94ad2bb22e01e09676c482abd3d8d37ab5 100644 (file)
@@ -9,7 +9,7 @@ obj-$(CONFIG_SYSCTL) += sysctl_net_core.o
 
 obj-y               += dev.o ethtool.o dev_addr_lists.o dst.o netevent.o \
                        neighbour.o rtnetlink.o utils.o link_watch.o filter.o \
-                       sock_diag.o dev_ioctl.o
+                       sock_diag.o dev_ioctl.o tso.o
 
 obj-$(CONFIG_XFRM) += flow.o
 obj-y += net-sysfs.o
index 11d70e3afefa467ceb64fccd3fad436b6abb9189..867adb25b5b896e74e60ee68bef8a450a30d554d 100644 (file)
@@ -2424,7 +2424,7 @@ EXPORT_SYMBOL(netdev_rx_csum_fault);
  * 2. No high memory really exists on this machine.
  */
 
-static int illegal_highdma(const struct net_device *dev, struct sk_buff *skb)
+static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
 {
 #ifdef CONFIG_HIGHMEM
        int i;
@@ -2499,38 +2499,36 @@ static int dev_gso_segment(struct sk_buff *skb, netdev_features_t features)
 }
 
 static netdev_features_t harmonize_features(struct sk_buff *skb,
-                                           const struct net_device *dev,
-                                           netdev_features_t features)
+       netdev_features_t features)
 {
        int tmp;
 
        if (skb->ip_summed != CHECKSUM_NONE &&
            !can_checksum_protocol(features, skb_network_protocol(skb, &tmp))) {
                features &= ~NETIF_F_ALL_CSUM;
-       } else if (illegal_highdma(dev, skb)) {
+       } else if (illegal_highdma(skb->dev, skb)) {
                features &= ~NETIF_F_SG;
        }
 
        return features;
 }
 
-netdev_features_t netif_skb_dev_features(struct sk_buff *skb,
-                                        const struct net_device *dev)
+netdev_features_t netif_skb_features(struct sk_buff *skb)
 {
        __be16 protocol = skb->protocol;
-       netdev_features_t features = dev->features;
+       netdev_features_t features = skb->dev->features;
 
-       if (skb_shinfo(skb)->gso_segs > dev->gso_max_segs)
+       if (skb_shinfo(skb)->gso_segs > skb->dev->gso_max_segs)
                features &= ~NETIF_F_GSO_MASK;
 
        if (protocol == htons(ETH_P_8021Q) || protocol == htons(ETH_P_8021AD)) {
                struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
                protocol = veh->h_vlan_encapsulated_proto;
        } else if (!vlan_tx_tag_present(skb)) {
-               return harmonize_features(skb, dev, features);
+               return harmonize_features(skb, features);
        }
 
-       features &= (dev->vlan_features | NETIF_F_HW_VLAN_CTAG_TX |
+       features &= (skb->dev->vlan_features | NETIF_F_HW_VLAN_CTAG_TX |
                                               NETIF_F_HW_VLAN_STAG_TX);
 
        if (protocol == htons(ETH_P_8021Q) || protocol == htons(ETH_P_8021AD))
@@ -2538,9 +2536,9 @@ netdev_features_t netif_skb_dev_features(struct sk_buff *skb,
                                NETIF_F_GEN_CSUM | NETIF_F_HW_VLAN_CTAG_TX |
                                NETIF_F_HW_VLAN_STAG_TX;
 
-       return harmonize_features(skb, dev, features);
+       return harmonize_features(skb, features);
 }
-EXPORT_SYMBOL(netif_skb_dev_features);
+EXPORT_SYMBOL(netif_skb_features);
 
 int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
                        struct netdev_queue *txq)
@@ -5606,10 +5604,6 @@ static void rollback_registered_many(struct list_head *head)
                */
                call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
 
-               if (!dev->rtnl_link_ops ||
-                   dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
-                       rtmsg_ifinfo(RTM_DELLINK, dev, ~0U, GFP_KERNEL);
-
                /*
                 *      Flush the unicast and multicast chains
                 */
@@ -5619,6 +5613,10 @@ static void rollback_registered_many(struct list_head *head)
                if (dev->netdev_ops->ndo_uninit)
                        dev->netdev_ops->ndo_uninit(dev);
 
+               if (!dev->rtnl_link_ops ||
+                   dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
+                       rtmsg_ifinfo(RTM_DELLINK, dev, ~0U, GFP_KERNEL);
+
                /* Notifier chain MUST detach us all upper devices. */
                WARN_ON(netdev_has_any_upper_dev(dev));
 
index 640ba0e5831ce0334a9cbf03983c16a022e85065..aa8978ac47d28b8588ff78e02e9607b1d616d6d1 100644 (file)
@@ -557,6 +557,25 @@ err_out:
        return ret;
 }
 
+static int ethtool_copy_validate_indir(u32 *indir, void __user *useraddr,
+                                       struct ethtool_rxnfc *rx_rings,
+                                       u32 size)
+{
+       int ret = 0, i;
+
+       if (copy_from_user(indir, useraddr, size * sizeof(indir[0])))
+               ret = -EFAULT;
+
+       /* Validate ring indices */
+       for (i = 0; i < size; i++) {
+               if (indir[i] >= rx_rings->data) {
+                       ret = -EINVAL;
+                       break;
+               }
+       }
+       return ret;
+}
+
 static noinline_for_stack int ethtool_get_rxfh_indir(struct net_device *dev,
                                                     void __user *useraddr)
 {
@@ -613,6 +632,7 @@ static noinline_for_stack int ethtool_set_rxfh_indir(struct net_device *dev,
        u32 *indir;
        const struct ethtool_ops *ops = dev->ethtool_ops;
        int ret;
+       u32 ringidx_offset = offsetof(struct ethtool_rxfh_indir, ring_index[0]);
 
        if (!ops->get_rxfh_indir_size || !ops->set_rxfh_indir ||
            !ops->get_rxnfc)
@@ -643,28 +663,196 @@ static noinline_for_stack int ethtool_set_rxfh_indir(struct net_device *dev,
                for (i = 0; i < dev_size; i++)
                        indir[i] = ethtool_rxfh_indir_default(i, rx_rings.data);
        } else {
-               if (copy_from_user(indir,
-                                 useraddr +
-                                 offsetof(struct ethtool_rxfh_indir,
-                                          ring_index[0]),
-                                 dev_size * sizeof(indir[0]))) {
+               ret = ethtool_copy_validate_indir(indir,
+                                                 useraddr + ringidx_offset,
+                                                 &rx_rings,
+                                                 dev_size);
+               if (ret)
+                       goto out;
+       }
+
+       ret = ops->set_rxfh_indir(dev, indir);
+
+out:
+       kfree(indir);
+       return ret;
+}
+
+static noinline_for_stack int ethtool_get_rxfh(struct net_device *dev,
+                                              void __user *useraddr)
+{
+       int ret;
+       const struct ethtool_ops *ops = dev->ethtool_ops;
+       u32 user_indir_size = 0, user_key_size = 0;
+       u32 dev_indir_size = 0, dev_key_size = 0;
+       u32 total_size;
+       u32 indir_offset, indir_bytes;
+       u32 key_offset;
+       u32 *indir = NULL;
+       u8 *hkey = NULL;
+       u8 *rss_config;
+
+       if (!(dev->ethtool_ops->get_rxfh_indir_size ||
+             dev->ethtool_ops->get_rxfh_key_size) ||
+             !dev->ethtool_ops->get_rxfh)
+               return -EOPNOTSUPP;
+
+       if (ops->get_rxfh_indir_size)
+               dev_indir_size = ops->get_rxfh_indir_size(dev);
+
+       indir_offset = offsetof(struct ethtool_rxfh, indir_size);
+
+       if (copy_from_user(&user_indir_size,
+                          useraddr + indir_offset,
+                          sizeof(user_indir_size)))
+               return -EFAULT;
+
+       if (copy_to_user(useraddr + indir_offset,
+                        &dev_indir_size, sizeof(dev_indir_size)))
+               return -EFAULT;
+
+       if (ops->get_rxfh_key_size)
+               dev_key_size = ops->get_rxfh_key_size(dev);
+
+       if ((dev_key_size + dev_indir_size) == 0)
+               return -EOPNOTSUPP;
+
+       key_offset = offsetof(struct ethtool_rxfh, key_size);
+
+       if (copy_from_user(&user_key_size,
+                          useraddr + key_offset,
+                          sizeof(user_key_size)))
+               return -EFAULT;
+
+       if (copy_to_user(useraddr + key_offset,
+                        &dev_key_size, sizeof(dev_key_size)))
+               return -EFAULT;
+
+       /* If the user buffer size is 0, this is just a query for the
+        * device table size and key size.  Otherwise, if the User size is
+        * not equal to device table size or key size it's an error.
+        */
+       if (!user_indir_size && !user_key_size)
+               return 0;
+
+       if ((user_indir_size && (user_indir_size != dev_indir_size)) ||
+           (user_key_size && (user_key_size != dev_key_size)))
+               return -EINVAL;
+
+       indir_bytes = user_indir_size * sizeof(indir[0]);
+       total_size = indir_bytes + user_key_size;
+       rss_config = kzalloc(total_size, GFP_USER);
+       if (!rss_config)
+               return -ENOMEM;
+
+       if (user_indir_size)
+               indir = (u32 *)rss_config;
+
+       if (user_key_size)
+               hkey = rss_config + indir_bytes;
+
+       ret = dev->ethtool_ops->get_rxfh(dev, indir, hkey);
+       if (!ret) {
+               if (copy_to_user(useraddr +
+                                offsetof(struct ethtool_rxfh, rss_config[0]),
+                                rss_config, total_size))
                        ret = -EFAULT;
+       }
+
+       kfree(rss_config);
+
+       return ret;
+}
+
+static noinline_for_stack int ethtool_set_rxfh(struct net_device *dev,
+                                              void __user *useraddr)
+{
+       int ret;
+       const struct ethtool_ops *ops = dev->ethtool_ops;
+       struct ethtool_rxnfc rx_rings;
+       u32 user_indir_size = 0, dev_indir_size = 0, i;
+       u32 user_key_size = 0, dev_key_size = 0;
+       u32 *indir = NULL, indir_bytes = 0;
+       u8 *hkey = NULL;
+       u8 *rss_config;
+       u32 indir_offset, key_offset;
+       u32 rss_cfg_offset = offsetof(struct ethtool_rxfh, rss_config[0]);
+
+       if (!(ops->get_rxfh_indir_size || ops->get_rxfh_key_size) ||
+           !ops->get_rxnfc || !ops->set_rxfh)
+               return -EOPNOTSUPP;
+
+       if (ops->get_rxfh_indir_size)
+               dev_indir_size = ops->get_rxfh_indir_size(dev);
+
+       indir_offset = offsetof(struct ethtool_rxfh, indir_size);
+       if (copy_from_user(&user_indir_size,
+                          useraddr + indir_offset,
+                          sizeof(user_indir_size)))
+               return -EFAULT;
+
+       if (ops->get_rxfh_key_size)
+               dev_key_size = dev->ethtool_ops->get_rxfh_key_size(dev);
+
+       if ((dev_key_size + dev_indir_size) == 0)
+               return -EOPNOTSUPP;
+
+       key_offset = offsetof(struct ethtool_rxfh, key_size);
+       if (copy_from_user(&user_key_size,
+                          useraddr + key_offset,
+                          sizeof(user_key_size)))
+               return -EFAULT;
+
+       /* If either indir or hash key is valid, proceed further.
+        */
+       if ((user_indir_size && ((user_indir_size != 0xDEADBEEF) &&
+                                user_indir_size != dev_indir_size)) ||
+           (user_key_size && (user_key_size != dev_key_size)))
+               return -EINVAL;
+
+       if (user_indir_size != 0xDEADBEEF)
+               indir_bytes = dev_indir_size * sizeof(indir[0]);
+
+       rss_config = kzalloc(indir_bytes + user_key_size, GFP_USER);
+       if (!rss_config)
+               return -ENOMEM;
+
+       rx_rings.cmd = ETHTOOL_GRXRINGS;
+       ret = ops->get_rxnfc(dev, &rx_rings, NULL);
+       if (ret)
+               goto out;
+
+       /* user_indir_size == 0 means reset the indir table to default.
+        * user_indir_size == 0xDEADBEEF means indir setting is not requested.
+        */
+       if (user_indir_size && user_indir_size != 0xDEADBEEF) {
+               indir = (u32 *)rss_config;
+               ret = ethtool_copy_validate_indir(indir,
+                                                 useraddr + rss_cfg_offset,
+                                                 &rx_rings,
+                                                 user_indir_size);
+               if (ret)
                        goto out;
-               }
+       } else if (user_indir_size == 0) {
+               indir = (u32 *)rss_config;
+               for (i = 0; i < dev_indir_size; i++)
+                       indir[i] = ethtool_rxfh_indir_default(i, rx_rings.data);
+       }
 
-               /* Validate ring indices */
-               for (i = 0; i < dev_size; i++) {
-                       if (indir[i] >= rx_rings.data) {
-                               ret = -EINVAL;
-                               goto out;
-                       }
+       if (user_key_size) {
+               hkey = rss_config + indir_bytes;
+               if (copy_from_user(hkey,
+                                  useraddr + rss_cfg_offset + indir_bytes,
+                                  user_key_size)) {
+                       ret = -EFAULT;
+                       goto out;
                }
        }
 
-       ret = ops->set_rxfh_indir(dev, indir);
+       ret = ops->set_rxfh(dev, indir, hkey);
 
 out:
-       kfree(indir);
+       kfree(rss_config);
        return ret;
 }
 
@@ -1491,6 +1679,7 @@ int dev_ethtool(struct net *net, struct ifreq *ifr)
        case ETHTOOL_GRXCLSRULE:
        case ETHTOOL_GRXCLSRLALL:
        case ETHTOOL_GRXFHINDIR:
+       case ETHTOOL_GRSSH:
        case ETHTOOL_GFEATURES:
        case ETHTOOL_GCHANNELS:
        case ETHTOOL_GET_TS_INFO:
@@ -1628,6 +1817,12 @@ int dev_ethtool(struct net *net, struct ifreq *ifr)
        case ETHTOOL_SRXFHINDIR:
                rc = ethtool_set_rxfh_indir(dev, useraddr);
                break;
+       case ETHTOOL_GRSSH:
+               rc = ethtool_get_rxfh(dev, useraddr);
+               break;
+       case ETHTOOL_SRSSH:
+               rc = ethtool_set_rxfh(dev, useraddr);
+               break;
        case ETHTOOL_GFEATURES:
                rc = ethtool_get_features(dev, useraddr);
                break;
index cd58614660cf54e1431392c5045bbdc9478336e6..7067cb240d3e266d4b93b9df2e480232a5f74361 100644 (file)
 #include <linux/seccomp.h>
 #include <linux/if_vlan.h>
 
+/* Registers */
+#define BPF_R0 regs[BPF_REG_0]
+#define BPF_R1 regs[BPF_REG_1]
+#define BPF_R2 regs[BPF_REG_2]
+#define BPF_R3 regs[BPF_REG_3]
+#define BPF_R4 regs[BPF_REG_4]
+#define BPF_R5 regs[BPF_REG_5]
+#define BPF_R6 regs[BPF_REG_6]
+#define BPF_R7 regs[BPF_REG_7]
+#define BPF_R8 regs[BPF_REG_8]
+#define BPF_R9 regs[BPF_REG_9]
+#define BPF_R10        regs[BPF_REG_10]
+
+/* Named registers */
+#define A      regs[insn->a_reg]
+#define X      regs[insn->x_reg]
+#define FP     regs[BPF_REG_FP]
+#define ARG1   regs[BPF_REG_ARG1]
+#define CTX    regs[BPF_REG_CTX]
+#define K      insn->imm
+
 /* No hurry in this branch
  *
  * Exported for the bpf jit load helper.
@@ -57,9 +78,9 @@ void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, int k, uns
                ptr = skb_network_header(skb) + k - SKF_NET_OFF;
        else if (k >= SKF_LL_OFF)
                ptr = skb_mac_header(skb) + k - SKF_LL_OFF;
-
        if (ptr >= skb->head && ptr + size <= skb_tail_pointer(skb))
                return ptr;
+
        return NULL;
 }
 
@@ -68,6 +89,7 @@ static inline void *load_pointer(const struct sk_buff *skb, int k,
 {
        if (k >= 0)
                return skb_header_pointer(skb, k, size, buffer);
+
        return bpf_internal_load_pointer_neg_helper(skb, k, size);
 }
 
@@ -131,210 +153,208 @@ noinline u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
  * keep, 0 for none. @ctx is the data we are operating on, @insn is the
  * array of filter instructions.
  */
-unsigned int __sk_run_filter(void *ctx, const struct sock_filter_int *insn)
+static unsigned int __sk_run_filter(void *ctx, const struct sock_filter_int *insn)
 {
        u64 stack[MAX_BPF_STACK / sizeof(u64)];
        u64 regs[MAX_BPF_REG], tmp;
-       void *ptr;
-       int off;
-
-#define K  insn->imm
-#define A  regs[insn->a_reg]
-#define X  regs[insn->x_reg]
-#define R0 regs[0]
-
-#define CONT    ({insn++; goto select_insn; })
-#define CONT_JMP ({insn++; goto select_insn; })
-
        static const void *jumptable[256] = {
                [0 ... 255] = &&default_label,
                /* Now overwrite non-defaults ... */
-#define DL(A, B, C)    [A|B|C] = &&A##_##B##_##C
-               DL(BPF_ALU, BPF_ADD, BPF_X),
-               DL(BPF_ALU, BPF_ADD, BPF_K),
-               DL(BPF_ALU, BPF_SUB, BPF_X),
-               DL(BPF_ALU, BPF_SUB, BPF_K),
-               DL(BPF_ALU, BPF_AND, BPF_X),
-               DL(BPF_ALU, BPF_AND, BPF_K),
-               DL(BPF_ALU, BPF_OR, BPF_X),
-               DL(BPF_ALU, BPF_OR, BPF_K),
-               DL(BPF_ALU, BPF_LSH, BPF_X),
-               DL(BPF_ALU, BPF_LSH, BPF_K),
-               DL(BPF_ALU, BPF_RSH, BPF_X),
-               DL(BPF_ALU, BPF_RSH, BPF_K),
-               DL(BPF_ALU, BPF_XOR, BPF_X),
-               DL(BPF_ALU, BPF_XOR, BPF_K),
-               DL(BPF_ALU, BPF_MUL, BPF_X),
-               DL(BPF_ALU, BPF_MUL, BPF_K),
-               DL(BPF_ALU, BPF_MOV, BPF_X),
-               DL(BPF_ALU, BPF_MOV, BPF_K),
-               DL(BPF_ALU, BPF_DIV, BPF_X),
-               DL(BPF_ALU, BPF_DIV, BPF_K),
-               DL(BPF_ALU, BPF_MOD, BPF_X),
-               DL(BPF_ALU, BPF_MOD, BPF_K),
-               DL(BPF_ALU, BPF_NEG, 0),
-               DL(BPF_ALU, BPF_END, BPF_TO_BE),
-               DL(BPF_ALU, BPF_END, BPF_TO_LE),
-               DL(BPF_ALU64, BPF_ADD, BPF_X),
-               DL(BPF_ALU64, BPF_ADD, BPF_K),
-               DL(BPF_ALU64, BPF_SUB, BPF_X),
-               DL(BPF_ALU64, BPF_SUB, BPF_K),
-               DL(BPF_ALU64, BPF_AND, BPF_X),
-               DL(BPF_ALU64, BPF_AND, BPF_K),
-               DL(BPF_ALU64, BPF_OR, BPF_X),
-               DL(BPF_ALU64, BPF_OR, BPF_K),
-               DL(BPF_ALU64, BPF_LSH, BPF_X),
-               DL(BPF_ALU64, BPF_LSH, BPF_K),
-               DL(BPF_ALU64, BPF_RSH, BPF_X),
-               DL(BPF_ALU64, BPF_RSH, BPF_K),
-               DL(BPF_ALU64, BPF_XOR, BPF_X),
-               DL(BPF_ALU64, BPF_XOR, BPF_K),
-               DL(BPF_ALU64, BPF_MUL, BPF_X),
-               DL(BPF_ALU64, BPF_MUL, BPF_K),
-               DL(BPF_ALU64, BPF_MOV, BPF_X),
-               DL(BPF_ALU64, BPF_MOV, BPF_K),
-               DL(BPF_ALU64, BPF_ARSH, BPF_X),
-               DL(BPF_ALU64, BPF_ARSH, BPF_K),
-               DL(BPF_ALU64, BPF_DIV, BPF_X),
-               DL(BPF_ALU64, BPF_DIV, BPF_K),
-               DL(BPF_ALU64, BPF_MOD, BPF_X),
-               DL(BPF_ALU64, BPF_MOD, BPF_K),
-               DL(BPF_ALU64, BPF_NEG, 0),
-               DL(BPF_JMP, BPF_CALL, 0),
-               DL(BPF_JMP, BPF_JA, 0),
-               DL(BPF_JMP, BPF_JEQ, BPF_X),
-               DL(BPF_JMP, BPF_JEQ, BPF_K),
-               DL(BPF_JMP, BPF_JNE, BPF_X),
-               DL(BPF_JMP, BPF_JNE, BPF_K),
-               DL(BPF_JMP, BPF_JGT, BPF_X),
-               DL(BPF_JMP, BPF_JGT, BPF_K),
-               DL(BPF_JMP, BPF_JGE, BPF_X),
-               DL(BPF_JMP, BPF_JGE, BPF_K),
-               DL(BPF_JMP, BPF_JSGT, BPF_X),
-               DL(BPF_JMP, BPF_JSGT, BPF_K),
-               DL(BPF_JMP, BPF_JSGE, BPF_X),
-               DL(BPF_JMP, BPF_JSGE, BPF_K),
-               DL(BPF_JMP, BPF_JSET, BPF_X),
-               DL(BPF_JMP, BPF_JSET, BPF_K),
-               DL(BPF_JMP, BPF_EXIT, 0),
-               DL(BPF_STX, BPF_MEM, BPF_B),
-               DL(BPF_STX, BPF_MEM, BPF_H),
-               DL(BPF_STX, BPF_MEM, BPF_W),
-               DL(BPF_STX, BPF_MEM, BPF_DW),
-               DL(BPF_STX, BPF_XADD, BPF_W),
-               DL(BPF_STX, BPF_XADD, BPF_DW),
-               DL(BPF_ST, BPF_MEM, BPF_B),
-               DL(BPF_ST, BPF_MEM, BPF_H),
-               DL(BPF_ST, BPF_MEM, BPF_W),
-               DL(BPF_ST, BPF_MEM, BPF_DW),
-               DL(BPF_LDX, BPF_MEM, BPF_B),
-               DL(BPF_LDX, BPF_MEM, BPF_H),
-               DL(BPF_LDX, BPF_MEM, BPF_W),
-               DL(BPF_LDX, BPF_MEM, BPF_DW),
-               DL(BPF_LD, BPF_ABS, BPF_W),
-               DL(BPF_LD, BPF_ABS, BPF_H),
-               DL(BPF_LD, BPF_ABS, BPF_B),
-               DL(BPF_LD, BPF_IND, BPF_W),
-               DL(BPF_LD, BPF_IND, BPF_H),
-               DL(BPF_LD, BPF_IND, BPF_B),
+#define DL(A, B, C)    [BPF_##A|BPF_##B|BPF_##C] = &&A##_##B##_##C
+               DL(ALU, ADD, X),
+               DL(ALU, ADD, K),
+               DL(ALU, SUB, X),
+               DL(ALU, SUB, K),
+               DL(ALU, AND, X),
+               DL(ALU, AND, K),
+               DL(ALU, OR, X),
+               DL(ALU, OR, K),
+               DL(ALU, LSH, X),
+               DL(ALU, LSH, K),
+               DL(ALU, RSH, X),
+               DL(ALU, RSH, K),
+               DL(ALU, XOR, X),
+               DL(ALU, XOR, K),
+               DL(ALU, MUL, X),
+               DL(ALU, MUL, K),
+               DL(ALU, MOV, X),
+               DL(ALU, MOV, K),
+               DL(ALU, DIV, X),
+               DL(ALU, DIV, K),
+               DL(ALU, MOD, X),
+               DL(ALU, MOD, K),
+               DL(ALU, NEG, 0),
+               DL(ALU, END, TO_BE),
+               DL(ALU, END, TO_LE),
+               DL(ALU64, ADD, X),
+               DL(ALU64, ADD, K),
+               DL(ALU64, SUB, X),
+               DL(ALU64, SUB, K),
+               DL(ALU64, AND, X),
+               DL(ALU64, AND, K),
+               DL(ALU64, OR, X),
+               DL(ALU64, OR, K),
+               DL(ALU64, LSH, X),
+               DL(ALU64, LSH, K),
+               DL(ALU64, RSH, X),
+               DL(ALU64, RSH, K),
+               DL(ALU64, XOR, X),
+               DL(ALU64, XOR, K),
+               DL(ALU64, MUL, X),
+               DL(ALU64, MUL, K),
+               DL(ALU64, MOV, X),
+               DL(ALU64, MOV, K),
+               DL(ALU64, ARSH, X),
+               DL(ALU64, ARSH, K),
+               DL(ALU64, DIV, X),
+               DL(ALU64, DIV, K),
+               DL(ALU64, MOD, X),
+               DL(ALU64, MOD, K),
+               DL(ALU64, NEG, 0),
+               DL(JMP, CALL, 0),
+               DL(JMP, JA, 0),
+               DL(JMP, JEQ, X),
+               DL(JMP, JEQ, K),
+               DL(JMP, JNE, X),
+               DL(JMP, JNE, K),
+               DL(JMP, JGT, X),
+               DL(JMP, JGT, K),
+               DL(JMP, JGE, X),
+               DL(JMP, JGE, K),
+               DL(JMP, JSGT, X),
+               DL(JMP, JSGT, K),
+               DL(JMP, JSGE, X),
+               DL(JMP, JSGE, K),
+               DL(JMP, JSET, X),
+               DL(JMP, JSET, K),
+               DL(JMP, EXIT, 0),
+               DL(STX, MEM, B),
+               DL(STX, MEM, H),
+               DL(STX, MEM, W),
+               DL(STX, MEM, DW),
+               DL(STX, XADD, W),
+               DL(STX, XADD, DW),
+               DL(ST, MEM, B),
+               DL(ST, MEM, H),
+               DL(ST, MEM, W),
+               DL(ST, MEM, DW),
+               DL(LDX, MEM, B),
+               DL(LDX, MEM, H),
+               DL(LDX, MEM, W),
+               DL(LDX, MEM, DW),
+               DL(LD, ABS, W),
+               DL(LD, ABS, H),
+               DL(LD, ABS, B),
+               DL(LD, IND, W),
+               DL(LD, IND, H),
+               DL(LD, IND, B),
 #undef DL
        };
+       void *ptr;
+       int off;
+
+#define CONT    ({ insn++; goto select_insn; })
+#define CONT_JMP ({ insn++; goto select_insn; })
+
+       FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)];
+       ARG1 = (u64) (unsigned long) ctx;
 
-       regs[FP_REG]  = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)];
-       regs[ARG1_REG] = (u64) (unsigned long) ctx;
+       /* Register for user BPF programs need to be reset first. */
+       regs[BPF_REG_A] = 0;
+       regs[BPF_REG_X] = 0;
 
 select_insn:
        goto *jumptable[insn->code];
 
        /* ALU */
 #define ALU(OPCODE, OP)                        \
-       BPF_ALU64_##OPCODE##_BPF_X:     \
+       ALU64_##OPCODE##_X:             \
                A = A OP X;             \
                CONT;                   \
-       BPF_ALU_##OPCODE##_BPF_X:       \
+       ALU_##OPCODE##_X:               \
                A = (u32) A OP (u32) X; \
                CONT;                   \
-       BPF_ALU64_##OPCODE##_BPF_K:     \
+       ALU64_##OPCODE##_K:             \
                A = A OP K;             \
                CONT;                   \
-       BPF_ALU_##OPCODE##_BPF_K:       \
+       ALU_##OPCODE##_K:               \
                A = (u32) A OP (u32) K; \
                CONT;
 
-       ALU(BPF_ADD,  +)
-       ALU(BPF_SUB,  -)
-       ALU(BPF_AND,  &)
-       ALU(BPF_OR,   |)
-       ALU(BPF_LSH, <<)
-       ALU(BPF_RSH, >>)
-       ALU(BPF_XOR,  ^)
-       ALU(BPF_MUL,  *)
+       ALU(ADD,  +)
+       ALU(SUB,  -)
+       ALU(AND,  &)
+       ALU(OR,   |)
+       ALU(LSH, <<)
+       ALU(RSH, >>)
+       ALU(XOR,  ^)
+       ALU(MUL,  *)
 #undef ALU
-       BPF_ALU_BPF_NEG_0:
+       ALU_NEG_0:
                A = (u32) -A;
                CONT;
-       BPF_ALU64_BPF_NEG_0:
+       ALU64_NEG_0:
                A = -A;
                CONT;
-       BPF_ALU_BPF_MOV_BPF_X:
+       ALU_MOV_X:
                A = (u32) X;
                CONT;
-       BPF_ALU_BPF_MOV_BPF_K:
+       ALU_MOV_K:
                A = (u32) K;
                CONT;
-       BPF_ALU64_BPF_MOV_BPF_X:
+       ALU64_MOV_X:
                A = X;
                CONT;
-       BPF_ALU64_BPF_MOV_BPF_K:
+       ALU64_MOV_K:
                A = K;
                CONT;
-       BPF_ALU64_BPF_ARSH_BPF_X:
+       ALU64_ARSH_X:
                (*(s64 *) &A) >>= X;
                CONT;
-       BPF_ALU64_BPF_ARSH_BPF_K:
+       ALU64_ARSH_K:
                (*(s64 *) &A) >>= K;
                CONT;
-       BPF_ALU64_BPF_MOD_BPF_X:
+       ALU64_MOD_X:
                if (unlikely(X == 0))
                        return 0;
                tmp = A;
                A = do_div(tmp, X);
                CONT;
-       BPF_ALU_BPF_MOD_BPF_X:
+       ALU_MOD_X:
                if (unlikely(X == 0))
                        return 0;
                tmp = (u32) A;
                A = do_div(tmp, (u32) X);
                CONT;
-       BPF_ALU64_BPF_MOD_BPF_K:
+       ALU64_MOD_K:
                tmp = A;
                A = do_div(tmp, K);
                CONT;
-       BPF_ALU_BPF_MOD_BPF_K:
+       ALU_MOD_K:
                tmp = (u32) A;
                A = do_div(tmp, (u32) K);
                CONT;
-       BPF_ALU64_BPF_DIV_BPF_X:
+       ALU64_DIV_X:
                if (unlikely(X == 0))
                        return 0;
                do_div(A, X);
                CONT;
-       BPF_ALU_BPF_DIV_BPF_X:
+       ALU_DIV_X:
                if (unlikely(X == 0))
                        return 0;
                tmp = (u32) A;
                do_div(tmp, (u32) X);
                A = (u32) tmp;
                CONT;
-       BPF_ALU64_BPF_DIV_BPF_K:
+       ALU64_DIV_K:
                do_div(A, K);
                CONT;
-       BPF_ALU_BPF_DIV_BPF_K:
+       ALU_DIV_K:
                tmp = (u32) A;
                do_div(tmp, (u32) K);
                A = (u32) tmp;
                CONT;
-       BPF_ALU_BPF_END_BPF_TO_BE:
+       ALU_END_TO_BE:
                switch (K) {
                case 16:
                        A = (__force u16) cpu_to_be16(A);
@@ -347,7 +367,7 @@ select_insn:
                        break;
                }
                CONT;
-       BPF_ALU_BPF_END_BPF_TO_LE:
+       ALU_END_TO_LE:
                switch (K) {
                case 16:
                        A = (__force u16) cpu_to_le16(A);
@@ -362,142 +382,144 @@ select_insn:
                CONT;
 
        /* CALL */
-       BPF_JMP_BPF_CALL_0:
-               /* Function call scratches R1-R5 registers, preserves R6-R9,
-                * and stores return value into R0.
+       JMP_CALL_0:
+               /* Function call scratches BPF_R1-BPF_R5 registers,
+                * preserves BPF_R6-BPF_R9, and stores return value
+                * into BPF_R0.
                 */
-               R0 = (__bpf_call_base + insn->imm)(regs[1], regs[2], regs[3],
-                                                  regs[4], regs[5]);
+               BPF_R0 = (__bpf_call_base + insn->imm)(BPF_R1, BPF_R2, BPF_R3,
+                                                      BPF_R4, BPF_R5);
                CONT;
 
        /* JMP */
-       BPF_JMP_BPF_JA_0:
+       JMP_JA_0:
                insn += insn->off;
                CONT;
-       BPF_JMP_BPF_JEQ_BPF_X:
+       JMP_JEQ_X:
                if (A == X) {
                        insn += insn->off;
                        CONT_JMP;
                }
                CONT;
-       BPF_JMP_BPF_JEQ_BPF_K:
+       JMP_JEQ_K:
                if (A == K) {
                        insn += insn->off;
                        CONT_JMP;
                }
                CONT;
-       BPF_JMP_BPF_JNE_BPF_X:
+       JMP_JNE_X:
                if (A != X) {
                        insn += insn->off;
                        CONT_JMP;
                }
                CONT;
-       BPF_JMP_BPF_JNE_BPF_K:
+       JMP_JNE_K:
                if (A != K) {
                        insn += insn->off;
                        CONT_JMP;
                }
                CONT;
-       BPF_JMP_BPF_JGT_BPF_X:
+       JMP_JGT_X:
                if (A > X) {
                        insn += insn->off;
                        CONT_JMP;
                }
                CONT;
-       BPF_JMP_BPF_JGT_BPF_K:
+       JMP_JGT_K:
                if (A > K) {
                        insn += insn->off;
                        CONT_JMP;
                }
                CONT;
-       BPF_JMP_BPF_JGE_BPF_X:
+       JMP_JGE_X:
                if (A >= X) {
                        insn += insn->off;
                        CONT_JMP;
                }
                CONT;
-       BPF_JMP_BPF_JGE_BPF_K:
+       JMP_JGE_K:
                if (A >= K) {
                        insn += insn->off;
                        CONT_JMP;
                }
                CONT;
-       BPF_JMP_BPF_JSGT_BPF_X:
-               if (((s64)A) > ((s64)X)) {
+       JMP_JSGT_X:
+               if (((s64) A) > ((s64) X)) {
                        insn += insn->off;
                        CONT_JMP;
                }
                CONT;
-       BPF_JMP_BPF_JSGT_BPF_K:
-               if (((s64)A) > ((s64)K)) {
+       JMP_JSGT_K:
+               if (((s64) A) > ((s64) K)) {
                        insn += insn->off;
                        CONT_JMP;
                }
                CONT;
-       BPF_JMP_BPF_JSGE_BPF_X:
-               if (((s64)A) >= ((s64)X)) {
+       JMP_JSGE_X:
+               if (((s64) A) >= ((s64) X)) {
                        insn += insn->off;
                        CONT_JMP;
                }
                CONT;
-       BPF_JMP_BPF_JSGE_BPF_K:
-               if (((s64)A) >= ((s64)K)) {
+       JMP_JSGE_K:
+               if (((s64) A) >= ((s64) K)) {
                        insn += insn->off;
                        CONT_JMP;
                }
                CONT;
-       BPF_JMP_BPF_JSET_BPF_X:
+       JMP_JSET_X:
                if (A & X) {
                        insn += insn->off;
                        CONT_JMP;
                }
                CONT;
-       BPF_JMP_BPF_JSET_BPF_K:
+       JMP_JSET_K:
                if (A & K) {
                        insn += insn->off;
                        CONT_JMP;
                }
                CONT;
-       BPF_JMP_BPF_EXIT_0:
-               return R0;
+       JMP_EXIT_0:
+               return BPF_R0;
 
        /* STX and ST and LDX*/
 #define LDST(SIZEOP, SIZE)                                     \
-       BPF_STX_BPF_MEM_##SIZEOP:                               \
+       STX_MEM_##SIZEOP:                                       \
                *(SIZE *)(unsigned long) (A + insn->off) = X;   \
                CONT;                                           \
-       BPF_ST_BPF_MEM_##SIZEOP:                                \
+       ST_MEM_##SIZEOP:                                        \
                *(SIZE *)(unsigned long) (A + insn->off) = K;   \
                CONT;                                           \
-       BPF_LDX_BPF_MEM_##SIZEOP:                               \
+       LDX_MEM_##SIZEOP:                                       \
                A = *(SIZE *)(unsigned long) (X + insn->off);   \
                CONT;
 
-       LDST(BPF_B,   u8)
-       LDST(BPF_H,  u16)
-       LDST(BPF_W,  u32)
-       LDST(BPF_DW, u64)
+       LDST(B,   u8)
+       LDST(H,  u16)
+       LDST(W,  u32)
+       LDST(DW, u64)
 #undef LDST
-       BPF_STX_BPF_XADD_BPF_W: /* lock xadd *(u32 *)(A + insn->off) += X */
+       STX_XADD_W: /* lock xadd *(u32 *)(A + insn->off) += X */
                atomic_add((u32) X, (atomic_t *)(unsigned long)
                           (A + insn->off));
                CONT;
-       BPF_STX_BPF_XADD_BPF_DW: /* lock xadd *(u64 *)(A + insn->off) += X */
+       STX_XADD_DW: /* lock xadd *(u64 *)(A + insn->off) += X */
                atomic64_add((u64) X, (atomic64_t *)(unsigned long)
                             (A + insn->off));
                CONT;
-       BPF_LD_BPF_ABS_BPF_W: /* R0 = ntohl(*(u32 *) (skb->data + K)) */
+       LD_ABS_W: /* BPF_R0 = ntohl(*(u32 *) (skb->data + K)) */
                off = K;
 load_word:
-               /* BPF_LD + BPD_ABS and BPF_LD + BPF_IND insns are only
-                * appearing in the programs where ctx == skb. All programs
-                * keep 'ctx' in regs[CTX_REG] == R6, sk_convert_filter()
-                * saves it in R6, internal BPF verifier will check that
-                * R6 == ctx.
+               /* BPF_LD + BPD_ABS and BPF_LD + BPF_IND insns are
+                * only appearing in the programs where ctx ==
+                * skb. All programs keep 'ctx' in regs[BPF_REG_CTX]
+                * == BPF_R6, sk_convert_filter() saves it in BPF_R6,
+                * internal BPF verifier will check that BPF_R6 ==
+                * ctx.
                 *
-                * BPF_ABS and BPF_IND are wrappers of function calls, so
-                * they scratch R1-R5 registers, preserve R6-R9, and store
-                * return value into R0.
+                * BPF_ABS and BPF_IND are wrappers of function calls,
+                * so they scratch BPF_R1-BPF_R5 registers, preserve
+                * BPF_R6-BPF_R9, and store return value into BPF_R0.
                 *
                 * Implicit input:
                 *   ctx
@@ -507,39 +529,39 @@ load_word:
                 *   K == 32-bit immediate
                 *
                 * Output:
-                *   R0 - 8/16/32-bit skb data converted to cpu endianness
+                *   BPF_R0 - 8/16/32-bit skb data converted to cpu endianness
                 */
                ptr = load_pointer((struct sk_buff *) ctx, off, 4, &tmp);
                if (likely(ptr != NULL)) {
-                       R0 = get_unaligned_be32(ptr);
+                       BPF_R0 = get_unaligned_be32(ptr);
                        CONT;
                }
                return 0;
-       BPF_LD_BPF_ABS_BPF_H: /* R0 = ntohs(*(u16 *) (skb->data + K)) */
+       LD_ABS_H: /* BPF_R0 = ntohs(*(u16 *) (skb->data + K)) */
                off = K;
 load_half:
                ptr = load_pointer((struct sk_buff *) ctx, off, 2, &tmp);
                if (likely(ptr != NULL)) {
-                       R0 = get_unaligned_be16(ptr);
+                       BPF_R0 = get_unaligned_be16(ptr);
                        CONT;
                }
                return 0;
-       BPF_LD_BPF_ABS_BPF_B: /* R0 = *(u8 *) (ctx + K) */
+       LD_ABS_B: /* BPF_R0 = *(u8 *) (ctx + K) */
                off = K;
 load_byte:
                ptr = load_pointer((struct sk_buff *) ctx, off, 1, &tmp);
                if (likely(ptr != NULL)) {
-                       R0 = *(u8 *)ptr;
+                       BPF_R0 = *(u8 *)ptr;
                        CONT;
                }
                return 0;
-       BPF_LD_BPF_IND_BPF_W: /* R0 = ntohl(*(u32 *) (skb->data + X + K)) */
+       LD_IND_W: /* BPF_R0 = ntohl(*(u32 *) (skb->data + X + K)) */
                off = K + X;
                goto load_word;
-       BPF_LD_BPF_IND_BPF_H: /* R0 = ntohs(*(u16 *) (skb->data + X + K)) */
+       LD_IND_H: /* BPF_R0 = ntohs(*(u16 *) (skb->data + X + K)) */
                off = K + X;
                goto load_half;
-       BPF_LD_BPF_IND_BPF_B: /* R0 = *(u8 *) (skb->data + X + K) */
+       LD_IND_B: /* BPF_R0 = *(u8 *) (skb->data + X + K) */
                off = K + X;
                goto load_byte;
 
@@ -547,24 +569,8 @@ load_byte:
                /* If we ever reach this, we have a bug somewhere. */
                WARN_RATELIMIT(1, "unknown opcode %02x\n", insn->code);
                return 0;
-#undef CONT_JMP
-#undef CONT
-
-#undef R0
-#undef X
-#undef A
-#undef K
 }
 
-u32 sk_run_filter_int_seccomp(const struct seccomp_data *ctx,
-                             const struct sock_filter_int *insni)
-    __attribute__ ((alias ("__sk_run_filter")));
-
-u32 sk_run_filter_int_skb(const struct sk_buff *ctx,
-                         const struct sock_filter_int *insni)
-    __attribute__ ((alias ("__sk_run_filter")));
-EXPORT_SYMBOL_GPL(sk_run_filter_int_skb);
-
 /* Helper to find the offset of pkt_type in sk_buff structure. We want
  * to make sure its still a 3bit field starting at a byte boundary;
  * taken from arch/x86/net/bpf_jit_comp.c.
@@ -585,16 +591,14 @@ static unsigned int pkt_type_offset(void)
        return -1;
 }
 
-static u64 __skb_get_pay_offset(u64 ctx, u64 A, u64 X, u64 r4, u64 r5)
+static u64 __skb_get_pay_offset(u64 ctx, u64 a, u64 x, u64 r4, u64 r5)
 {
-       struct sk_buff *skb = (struct sk_buff *)(long) ctx;
-
-       return __skb_get_poff(skb);
+       return __skb_get_poff((struct sk_buff *)(unsigned long) ctx);
 }
 
-static u64 __skb_get_nlattr(u64 ctx, u64 A, u64 X, u64 r4, u64 r5)
+static u64 __skb_get_nlattr(u64 ctx, u64 a, u64 x, u64 r4, u64 r5)
 {
-       struct sk_buff *skb = (struct sk_buff *)(long) ctx;
+       struct sk_buff *skb = (struct sk_buff *)(unsigned long) ctx;
        struct nlattr *nla;
 
        if (skb_is_nonlinear(skb))
@@ -603,19 +607,19 @@ static u64 __skb_get_nlattr(u64 ctx, u64 A, u64 X, u64 r4, u64 r5)
        if (skb->len < sizeof(struct nlattr))
                return 0;
 
-       if (A > skb->len - sizeof(struct nlattr))
+       if (a > skb->len - sizeof(struct nlattr))
                return 0;
 
-       nla = nla_find((struct nlattr *) &skb->data[A], skb->len - A, X);
+       nla = nla_find((struct nlattr *) &skb->data[a], skb->len - a, x);
        if (nla)
                return (void *) nla - (void *) skb->data;
 
        return 0;
 }
 
-static u64 __skb_get_nlattr_nest(u64 ctx, u64 A, u64 X, u64 r4, u64 r5)
+static u64 __skb_get_nlattr_nest(u64 ctx, u64 a, u64 x, u64 r4, u64 r5)
 {
-       struct sk_buff *skb = (struct sk_buff *)(long) ctx;
+       struct sk_buff *skb = (struct sk_buff *)(unsigned long) ctx;
        struct nlattr *nla;
 
        if (skb_is_nonlinear(skb))
@@ -624,31 +628,30 @@ static u64 __skb_get_nlattr_nest(u64 ctx, u64 A, u64 X, u64 r4, u64 r5)
        if (skb->len < sizeof(struct nlattr))
                return 0;
 
-       if (A > skb->len - sizeof(struct nlattr))
+       if (a > skb->len - sizeof(struct nlattr))
                return 0;
 
-       nla = (struct nlattr *) &skb->data[A];
-       if (nla->nla_len > skb->len - A)
+       nla = (struct nlattr *) &skb->data[a];
+       if (nla->nla_len > skb->len - a)
                return 0;
 
-       nla = nla_find_nested(nla, X);
+       nla = nla_find_nested(nla, x);
        if (nla)
                return (void *) nla - (void *) skb->data;
 
        return 0;
 }
 
-static u64 __get_raw_cpu_id(u64 ctx, u64 A, u64 X, u64 r4, u64 r5)
+static u64 __get_raw_cpu_id(u64 ctx, u64 a, u64 x, u64 r4, u64 r5)
 {
        return raw_smp_processor_id();
 }
 
-/* Register mappings for user programs. */
-#define A_REG          0
-#define X_REG          7
-#define TMP_REG                8
-#define ARG2_REG       2
-#define ARG3_REG       3
+/* note that this only generates 32-bit random numbers */
+static u64 __get_random_u32(u64 ctx, u64 a, u64 x, u64 r4, u64 r5)
+{
+       return prandom_u32();
+}
 
 static bool convert_bpf_extensions(struct sock_filter *fp,
                                   struct sock_filter_int **insnp)
@@ -659,57 +662,46 @@ static bool convert_bpf_extensions(struct sock_filter *fp,
        case SKF_AD_OFF + SKF_AD_PROTOCOL:
                BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, protocol) != 2);
 
-               insn->code = BPF_LDX | BPF_MEM | BPF_H;
-               insn->a_reg = A_REG;
-               insn->x_reg = CTX_REG;
-               insn->off = offsetof(struct sk_buff, protocol);
+               /* A = *(u16 *) (ctx + offsetof(protocol)) */
+               *insn = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_CTX,
+                                   offsetof(struct sk_buff, protocol));
                insn++;
 
                /* A = ntohs(A) [emitting a nop or swap16] */
                insn->code = BPF_ALU | BPF_END | BPF_FROM_BE;
-               insn->a_reg = A_REG;
+               insn->a_reg = BPF_REG_A;
                insn->imm = 16;
                break;
 
        case SKF_AD_OFF + SKF_AD_PKTTYPE:
-               insn->code = BPF_LDX | BPF_MEM | BPF_B;
-               insn->a_reg = A_REG;
-               insn->x_reg = CTX_REG;
-               insn->off = pkt_type_offset();
+               *insn = BPF_LDX_MEM(BPF_B, BPF_REG_A, BPF_REG_CTX,
+                                   pkt_type_offset());
                if (insn->off < 0)
                        return false;
                insn++;
 
-               insn->code = BPF_ALU | BPF_AND | BPF_K;
-               insn->a_reg = A_REG;
-               insn->imm = PKT_TYPE_MAX;
+               *insn = BPF_ALU32_IMM(BPF_AND, BPF_REG_A, PKT_TYPE_MAX);
                break;
 
        case SKF_AD_OFF + SKF_AD_IFINDEX:
        case SKF_AD_OFF + SKF_AD_HATYPE:
-               if (FIELD_SIZEOF(struct sk_buff, dev) == 8)
-                       insn->code = BPF_LDX | BPF_MEM | BPF_DW;
-               else
-                       insn->code = BPF_LDX | BPF_MEM | BPF_W;
-               insn->a_reg = TMP_REG;
-               insn->x_reg = CTX_REG;
-               insn->off = offsetof(struct sk_buff, dev);
+               *insn = BPF_LDX_MEM(size_to_bpf(FIELD_SIZEOF(struct sk_buff, dev)),
+                                   BPF_REG_TMP, BPF_REG_CTX,
+                                   offsetof(struct sk_buff, dev));
                insn++;
 
-               insn->code = BPF_JMP | BPF_JNE | BPF_K;
-               insn->a_reg = TMP_REG;
-               insn->imm = 0;
-               insn->off = 1;
+               /* if (tmp != 0) goto pc+1 */
+               *insn = BPF_JMP_IMM(BPF_JNE, BPF_REG_TMP, 0, 1);
                insn++;
 
-               insn->code = BPF_JMP | BPF_EXIT;
+               *insn = BPF_EXIT_INSN();
                insn++;
 
                BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, ifindex) != 4);
                BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, type) != 2);
 
-               insn->a_reg = A_REG;
-               insn->x_reg = TMP_REG;
+               insn->a_reg = BPF_REG_A;
+               insn->x_reg = BPF_REG_TMP;
 
                if (fp->k == SKF_AD_OFF + SKF_AD_IFINDEX) {
                        insn->code = BPF_LDX | BPF_MEM | BPF_W;
@@ -723,55 +715,45 @@ static bool convert_bpf_extensions(struct sock_filter *fp,
        case SKF_AD_OFF + SKF_AD_MARK:
                BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4);
 
-               insn->code = BPF_LDX | BPF_MEM | BPF_W;
-               insn->a_reg = A_REG;
-               insn->x_reg = CTX_REG;
-               insn->off = offsetof(struct sk_buff, mark);
+               *insn = BPF_LDX_MEM(BPF_W, BPF_REG_A, BPF_REG_CTX,
+                                   offsetof(struct sk_buff, mark));
                break;
 
        case SKF_AD_OFF + SKF_AD_RXHASH:
                BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4);
 
-               insn->code = BPF_LDX | BPF_MEM | BPF_W;
-               insn->a_reg = A_REG;
-               insn->x_reg = CTX_REG;
-               insn->off = offsetof(struct sk_buff, hash);
+               *insn = BPF_LDX_MEM(BPF_W, BPF_REG_A, BPF_REG_CTX,
+                                   offsetof(struct sk_buff, hash));
                break;
 
        case SKF_AD_OFF + SKF_AD_QUEUE:
                BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, queue_mapping) != 2);
 
-               insn->code = BPF_LDX | BPF_MEM | BPF_H;
-               insn->a_reg = A_REG;
-               insn->x_reg = CTX_REG;
-               insn->off = offsetof(struct sk_buff, queue_mapping);
+               *insn = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_CTX,
+                                   offsetof(struct sk_buff, queue_mapping));
                break;
 
        case SKF_AD_OFF + SKF_AD_VLAN_TAG:
        case SKF_AD_OFF + SKF_AD_VLAN_TAG_PRESENT:
                BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2);
 
-               insn->code = BPF_LDX | BPF_MEM | BPF_H;
-               insn->a_reg = A_REG;
-               insn->x_reg = CTX_REG;
-               insn->off = offsetof(struct sk_buff, vlan_tci);
+               /* A = *(u16 *) (ctx + offsetof(vlan_tci)) */
+               *insn = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_CTX,
+                                   offsetof(struct sk_buff, vlan_tci));
                insn++;
 
                BUILD_BUG_ON(VLAN_TAG_PRESENT != 0x1000);
 
                if (fp->k == SKF_AD_OFF + SKF_AD_VLAN_TAG) {
-                       insn->code = BPF_ALU | BPF_AND | BPF_K;
-                       insn->a_reg = A_REG;
-                       insn->imm = ~VLAN_TAG_PRESENT;
+                       *insn = BPF_ALU32_IMM(BPF_AND, BPF_REG_A,
+                                             ~VLAN_TAG_PRESENT);
                } else {
-                       insn->code = BPF_ALU | BPF_RSH | BPF_K;
-                       insn->a_reg = A_REG;
-                       insn->imm = 12;
+                       /* A >>= 12 */
+                       *insn = BPF_ALU32_IMM(BPF_RSH, BPF_REG_A, 12);
                        insn++;
 
-                       insn->code = BPF_ALU | BPF_AND | BPF_K;
-                       insn->a_reg = A_REG;
-                       insn->imm = 1;
+                       /* A &= 1 */
+                       *insn = BPF_ALU32_IMM(BPF_AND, BPF_REG_A, 1);
                }
                break;
 
@@ -779,22 +761,17 @@ static bool convert_bpf_extensions(struct sock_filter *fp,
        case SKF_AD_OFF + SKF_AD_NLATTR:
        case SKF_AD_OFF + SKF_AD_NLATTR_NEST:
        case SKF_AD_OFF + SKF_AD_CPU:
+       case SKF_AD_OFF + SKF_AD_RANDOM:
                /* arg1 = ctx */
-               insn->code = BPF_ALU64 | BPF_MOV | BPF_X;
-               insn->a_reg = ARG1_REG;
-               insn->x_reg = CTX_REG;
+               *insn = BPF_ALU64_REG(BPF_MOV, BPF_REG_ARG1, BPF_REG_CTX);
                insn++;
 
                /* arg2 = A */
-               insn->code = BPF_ALU64 | BPF_MOV | BPF_X;
-               insn->a_reg = ARG2_REG;
-               insn->x_reg = A_REG;
+               *insn = BPF_ALU64_REG(BPF_MOV, BPF_REG_ARG2, BPF_REG_A);
                insn++;
 
                /* arg3 = X */
-               insn->code = BPF_ALU64 | BPF_MOV | BPF_X;
-               insn->a_reg = ARG3_REG;
-               insn->x_reg = X_REG;
+               *insn = BPF_ALU64_REG(BPF_MOV, BPF_REG_ARG3, BPF_REG_X);
                insn++;
 
                /* Emit call(ctx, arg2=A, arg3=X) */
@@ -812,13 +789,15 @@ static bool convert_bpf_extensions(struct sock_filter *fp,
                case SKF_AD_OFF + SKF_AD_CPU:
                        insn->imm = __get_raw_cpu_id - __bpf_call_base;
                        break;
+               case SKF_AD_OFF + SKF_AD_RANDOM:
+                       insn->imm = __get_random_u32 - __bpf_call_base;
+                       break;
                }
                break;
 
        case SKF_AD_OFF + SKF_AD_ALU_XOR_X:
-               insn->code = BPF_ALU | BPF_XOR | BPF_X;
-               insn->a_reg = A_REG;
-               insn->x_reg = X_REG;
+               /* A ^= X */
+               *insn = BPF_ALU32_REG(BPF_XOR, BPF_REG_A, BPF_REG_X);
                break;
 
        default:
@@ -868,7 +847,7 @@ int sk_convert_filter(struct sock_filter *prog, int len,
        u8 bpf_src;
 
        BUILD_BUG_ON(BPF_MEMWORDS * sizeof(u32) > MAX_BPF_STACK);
-       BUILD_BUG_ON(FP_REG + 1 != MAX_BPF_REG);
+       BUILD_BUG_ON(BPF_REG_FP + 1 != MAX_BPF_REG);
 
        if (len <= 0 || len >= BPF_MAXINSNS)
                return -EINVAL;
@@ -884,9 +863,7 @@ do_pass:
        fp = prog;
 
        if (new_insn) {
-               new_insn->code = BPF_ALU64 | BPF_MOV | BPF_X;
-               new_insn->a_reg = CTX_REG;
-               new_insn->x_reg = ARG1_REG;
+               *new_insn = BPF_ALU64_REG(BPF_MOV, BPF_REG_CTX, BPF_REG_ARG1);
        }
        new_insn++;
 
@@ -936,8 +913,8 @@ do_pass:
                                break;
 
                        insn->code = fp->code;
-                       insn->a_reg = A_REG;
-                       insn->x_reg = X_REG;
+                       insn->a_reg = BPF_REG_A;
+                       insn->x_reg = BPF_REG_X;
                        insn->imm = fp->k;
                        break;
 
@@ -971,16 +948,16 @@ do_pass:
                                 * in compare insn.
                                 */
                                insn->code = BPF_ALU | BPF_MOV | BPF_K;
-                               insn->a_reg = TMP_REG;
+                               insn->a_reg = BPF_REG_TMP;
                                insn->imm = fp->k;
                                insn++;
 
-                               insn->a_reg = A_REG;
-                               insn->x_reg = TMP_REG;
+                               insn->a_reg = BPF_REG_A;
+                               insn->x_reg = BPF_REG_TMP;
                                bpf_src = BPF_X;
                        } else {
-                               insn->a_reg = A_REG;
-                               insn->x_reg = X_REG;
+                               insn->a_reg = BPF_REG_A;
+                               insn->x_reg = BPF_REG_X;
                                insn->imm = fp->k;
                                bpf_src = BPF_SRC(fp->code);
                        }
@@ -1014,34 +991,28 @@ do_pass:
 
                /* ldxb 4 * ([14] & 0xf) is remaped into 6 insns. */
                case BPF_LDX | BPF_MSH | BPF_B:
-                       insn->code = BPF_ALU64 | BPF_MOV | BPF_X;
-                       insn->a_reg = TMP_REG;
-                       insn->x_reg = A_REG;
+                       /* tmp = A */
+                       *insn = BPF_ALU64_REG(BPF_MOV, BPF_REG_TMP, BPF_REG_A);
                        insn++;
 
-                       insn->code = BPF_LD | BPF_ABS | BPF_B;
-                       insn->a_reg = A_REG;
-                       insn->imm = fp->k;
+                       /* A = BPF_R0 = *(u8 *) (skb->data + K) */
+                       *insn = BPF_LD_ABS(BPF_B, fp->k);
                        insn++;
 
-                       insn->code = BPF_ALU | BPF_AND | BPF_K;
-                       insn->a_reg = A_REG;
-                       insn->imm = 0xf;
+                       /* A &= 0xf */
+                       *insn = BPF_ALU32_IMM(BPF_AND, BPF_REG_A, 0xf);
                        insn++;
 
-                       insn->code = BPF_ALU | BPF_LSH | BPF_K;
-                       insn->a_reg = A_REG;
-                       insn->imm = 2;
+                       /* A <<= 2 */
+                       *insn = BPF_ALU32_IMM(BPF_LSH, BPF_REG_A, 2);
                        insn++;
 
-                       insn->code = BPF_ALU64 | BPF_MOV | BPF_X;
-                       insn->a_reg = X_REG;
-                       insn->x_reg = A_REG;
+                       /* X = A */
+                       *insn = BPF_ALU64_REG(BPF_MOV, BPF_REG_X, BPF_REG_A);
                        insn++;
 
-                       insn->code = BPF_ALU64 | BPF_MOV | BPF_X;
-                       insn->a_reg = A_REG;
-                       insn->x_reg = TMP_REG;
+                       /* A = tmp */
+                       *insn = BPF_ALU64_REG(BPF_MOV, BPF_REG_A, BPF_REG_TMP);
                        break;
 
                /* RET_K, RET_A are remaped into 2 insns. */
@@ -1051,19 +1022,20 @@ do_pass:
                                     (BPF_RVAL(fp->code) == BPF_K ?
                                      BPF_K : BPF_X);
                        insn->a_reg = 0;
-                       insn->x_reg = A_REG;
+                       insn->x_reg = BPF_REG_A;
                        insn->imm = fp->k;
                        insn++;
 
-                       insn->code = BPF_JMP | BPF_EXIT;
+                       *insn = BPF_EXIT_INSN();
                        break;
 
                /* Store to stack. */
                case BPF_ST:
                case BPF_STX:
                        insn->code = BPF_STX | BPF_MEM | BPF_W;
-                       insn->a_reg = FP_REG;
-                       insn->x_reg = fp->code == BPF_ST ? A_REG : X_REG;
+                       insn->a_reg = BPF_REG_FP;
+                       insn->x_reg = fp->code == BPF_ST ?
+                                     BPF_REG_A : BPF_REG_X;
                        insn->off = -(BPF_MEMWORDS - fp->k) * 4;
                        break;
 
@@ -1072,8 +1044,8 @@ do_pass:
                case BPF_LDX | BPF_MEM:
                        insn->code = BPF_LDX | BPF_MEM | BPF_W;
                        insn->a_reg = BPF_CLASS(fp->code) == BPF_LD ?
-                                     A_REG : X_REG;
-                       insn->x_reg = FP_REG;
+                                     BPF_REG_A : BPF_REG_X;
+                       insn->x_reg = BPF_REG_FP;
                        insn->off = -(BPF_MEMWORDS - fp->k) * 4;
                        break;
 
@@ -1082,22 +1054,18 @@ do_pass:
                case BPF_LDX | BPF_IMM:
                        insn->code = BPF_ALU | BPF_MOV | BPF_K;
                        insn->a_reg = BPF_CLASS(fp->code) == BPF_LD ?
-                                     A_REG : X_REG;
+                                     BPF_REG_A : BPF_REG_X;
                        insn->imm = fp->k;
                        break;
 
                /* X = A */
                case BPF_MISC | BPF_TAX:
-                       insn->code = BPF_ALU64 | BPF_MOV | BPF_X;
-                       insn->a_reg = X_REG;
-                       insn->x_reg = A_REG;
+                       *insn = BPF_ALU64_REG(BPF_MOV, BPF_REG_X, BPF_REG_A);
                        break;
 
                /* A = X */
                case BPF_MISC | BPF_TXA:
-                       insn->code = BPF_ALU64 | BPF_MOV | BPF_X;
-                       insn->a_reg = A_REG;
-                       insn->x_reg = X_REG;
+                       *insn = BPF_ALU64_REG(BPF_MOV, BPF_REG_A, BPF_REG_X);
                        break;
 
                /* A = skb->len or X = skb->len */
@@ -1105,17 +1073,15 @@ do_pass:
                case BPF_LDX | BPF_W | BPF_LEN:
                        insn->code = BPF_LDX | BPF_MEM | BPF_W;
                        insn->a_reg = BPF_CLASS(fp->code) == BPF_LD ?
-                                     A_REG : X_REG;
-                       insn->x_reg = CTX_REG;
+                                     BPF_REG_A : BPF_REG_X;
+                       insn->x_reg = BPF_REG_CTX;
                        insn->off = offsetof(struct sk_buff, len);
                        break;
 
                /* access seccomp_data fields */
                case BPF_LDX | BPF_ABS | BPF_W:
-                       insn->code = BPF_LDX | BPF_MEM | BPF_W;
-                       insn->a_reg = A_REG;
-                       insn->x_reg = CTX_REG;
-                       insn->off = fp->k;
+                       /* A = *(u32 *) (ctx + K) */
+                       *insn = BPF_LDX_MEM(BPF_W, BPF_REG_A, BPF_REG_CTX, fp->k);
                        break;
 
                default:
@@ -1362,6 +1328,7 @@ int sk_chk_filter(struct sock_filter *filter, unsigned int flen)
                        ANCILLARY(VLAN_TAG);
                        ANCILLARY(VLAN_TAG_PRESENT);
                        ANCILLARY(PAY_OFFSET);
+                       ANCILLARY(RANDOM);
                        }
 
                        /* ancillary operation unknown or unsupported */
@@ -1421,7 +1388,7 @@ static void sk_filter_release_rcu(struct rcu_head *rcu)
        struct sk_filter *fp = container_of(rcu, struct sk_filter, rcu);
 
        sk_release_orig_filter(fp);
-       bpf_jit_free(fp);
+       sk_filter_free(fp);
 }
 
 /**
@@ -1459,7 +1426,7 @@ static struct sk_filter *__sk_migrate_realloc(struct sk_filter *fp,
 
        fp_new = sock_kmalloc(sk, len, GFP_KERNEL);
        if (fp_new) {
-               memcpy(fp_new, fp, sizeof(struct sk_filter));
+               *fp_new = *fp;
                /* As we're kepping orig_prog in fp_new along,
                 * we need to make sure we're not evicting it
                 * from the old fp.
@@ -1521,7 +1488,6 @@ static struct sk_filter *__sk_migrate_filter(struct sk_filter *fp,
                goto out_err_free;
        }
 
-       fp->bpf_func = sk_run_filter_int_skb;
        fp->len = new_len;
 
        /* 2nd pass: remap sock_filter insns into sock_filter_int insns. */
@@ -1534,6 +1500,8 @@ static struct sk_filter *__sk_migrate_filter(struct sk_filter *fp,
                 */
                goto out_err_free;
 
+       sk_filter_select_runtime(fp);
+
        kfree(old_prog);
        return fp;
 
@@ -1548,6 +1516,33 @@ out_err:
        return ERR_PTR(err);
 }
 
+void __weak bpf_int_jit_compile(struct sk_filter *prog)
+{
+}
+
+/**
+ *     sk_filter_select_runtime - select execution runtime for BPF program
+ *     @fp: sk_filter populated with internal BPF program
+ *
+ * try to JIT internal BPF program, if JIT is not available select interpreter
+ * BPF program will be executed via SK_RUN_FILTER() macro
+ */
+void sk_filter_select_runtime(struct sk_filter *fp)
+{
+       fp->bpf_func = (void *) __sk_run_filter;
+
+       /* Probe if internal BPF can be JITed */
+       bpf_int_jit_compile(fp);
+}
+EXPORT_SYMBOL_GPL(sk_filter_select_runtime);
+
+/* free internal BPF program */
+void sk_filter_free(struct sk_filter *fp)
+{
+       bpf_jit_free(fp);
+}
+EXPORT_SYMBOL_GPL(sk_filter_free);
+
 static struct sk_filter *__sk_prepare_filter(struct sk_filter *fp,
                                             struct sock *sk)
 {
@@ -1746,6 +1741,7 @@ void sk_decode_filter(struct sock_filter *filt, struct sock_filter *to)
                [BPF_S_ANC_VLAN_TAG]    = BPF_LD|BPF_B|BPF_ABS,
                [BPF_S_ANC_VLAN_TAG_PRESENT] = BPF_LD|BPF_B|BPF_ABS,
                [BPF_S_ANC_PAY_OFFSET]  = BPF_LD|BPF_B|BPF_ABS,
+               [BPF_S_ANC_RANDOM]      = BPF_LD|BPF_B|BPF_ABS,
                [BPF_S_LD_W_LEN]        = BPF_LD|BPF_W|BPF_LEN,
                [BPF_S_LD_W_IND]        = BPF_LD|BPF_W|BPF_IND,
                [BPF_S_LD_H_IND]        = BPF_LD|BPF_H|BPF_IND,
index 81d3a9a084536541867afe9350602c0c73253006..05e949d482049b839c59714381cde1e8a186926a 100644 (file)
@@ -273,7 +273,7 @@ static void cleanup_net(struct work_struct *work)
 {
        const struct pernet_operations *ops;
        struct net *net, *tmp;
-       LIST_HEAD(net_kill_list);
+       struct list_head net_kill_list;
        LIST_HEAD(net_exit_list);
 
        /* Atomically snapshot the list of namespaces to cleanup */
index 0304f981f7ffa5f005b28f08f17ff8264c41bb65..fc17a9d309ac028fc61ac7db6e35a05c77513a24 100644 (file)
@@ -573,7 +573,7 @@ static int pktgen_if_show(struct seq_file *seq, void *v)
                   is_zero_ether_addr(pkt_dev->src_mac) ?
                             pkt_dev->odev->dev_addr : pkt_dev->src_mac);
 
-       seq_printf(seq, "dst_mac: ");
+       seq_puts(seq, "dst_mac: ");
        seq_printf(seq, "%pM\n", pkt_dev->dst_mac);
 
        seq_printf(seq,
@@ -588,7 +588,7 @@ static int pktgen_if_show(struct seq_file *seq, void *v)
 
        if (pkt_dev->nr_labels) {
                unsigned int i;
-               seq_printf(seq, "     mpls: ");
+               seq_puts(seq, "     mpls: ");
                for (i = 0; i < pkt_dev->nr_labels; i++)
                        seq_printf(seq, "%08x%s", ntohl(pkt_dev->labels[i]),
                                   i == pkt_dev->nr_labels-1 ? "\n" : ", ");
@@ -613,67 +613,67 @@ static int pktgen_if_show(struct seq_file *seq, void *v)
        if (pkt_dev->node >= 0)
                seq_printf(seq, "     node: %d\n", pkt_dev->node);
 
-       seq_printf(seq, "     Flags: ");
+       seq_puts(seq, "     Flags: ");
 
        if (pkt_dev->flags & F_IPV6)
-               seq_printf(seq, "IPV6  ");
+               seq_puts(seq, "IPV6  ");
 
        if (pkt_dev->flags & F_IPSRC_RND)
-               seq_printf(seq, "IPSRC_RND  ");
+               seq_puts(seq, "IPSRC_RND  ");
 
        if (pkt_dev->flags & F_IPDST_RND)
-               seq_printf(seq, "IPDST_RND  ");
+               seq_puts(seq, "IPDST_RND  ");
 
        if (pkt_dev->flags & F_TXSIZE_RND)
-               seq_printf(seq, "TXSIZE_RND  ");
+               seq_puts(seq, "TXSIZE_RND  ");
 
        if (pkt_dev->flags & F_UDPSRC_RND)
-               seq_printf(seq, "UDPSRC_RND  ");
+               seq_puts(seq, "UDPSRC_RND  ");
 
        if (pkt_dev->flags & F_UDPDST_RND)
-               seq_printf(seq, "UDPDST_RND  ");
+               seq_puts(seq, "UDPDST_RND  ");
 
        if (pkt_dev->flags & F_UDPCSUM)
-               seq_printf(seq, "UDPCSUM  ");
+               seq_puts(seq, "UDPCSUM  ");
 
        if (pkt_dev->flags & F_MPLS_RND)
-               seq_printf(seq,  "MPLS_RND  ");
+               seq_puts(seq,  "MPLS_RND  ");
 
        if (pkt_dev->flags & F_QUEUE_MAP_RND)
-               seq_printf(seq,  "QUEUE_MAP_RND  ");
+               seq_puts(seq,  "QUEUE_MAP_RND  ");
 
        if (pkt_dev->flags & F_QUEUE_MAP_CPU)
-               seq_printf(seq,  "QUEUE_MAP_CPU  ");
+               seq_puts(seq,  "QUEUE_MAP_CPU  ");
 
        if (pkt_dev->cflows) {
                if (pkt_dev->flags & F_FLOW_SEQ)
-                       seq_printf(seq,  "FLOW_SEQ  "); /*in sequence flows*/
+                       seq_puts(seq,  "FLOW_SEQ  "); /*in sequence flows*/
                else
-                       seq_printf(seq,  "FLOW_RND  ");
+                       seq_puts(seq,  "FLOW_RND  ");
        }
 
 #ifdef CONFIG_XFRM
        if (pkt_dev->flags & F_IPSEC_ON) {
-               seq_printf(seq,  "IPSEC  ");
+               seq_puts(seq,  "IPSEC  ");
                if (pkt_dev->spi)
                        seq_printf(seq, "spi:%u", pkt_dev->spi);
        }
 #endif
 
        if (pkt_dev->flags & F_MACSRC_RND)
-               seq_printf(seq, "MACSRC_RND  ");
+               seq_puts(seq, "MACSRC_RND  ");
 
        if (pkt_dev->flags & F_MACDST_RND)
-               seq_printf(seq, "MACDST_RND  ");
+               seq_puts(seq, "MACDST_RND  ");
 
        if (pkt_dev->flags & F_VID_RND)
-               seq_printf(seq, "VID_RND  ");
+               seq_puts(seq, "VID_RND  ");
 
        if (pkt_dev->flags & F_SVID_RND)
-               seq_printf(seq, "SVID_RND  ");
+               seq_puts(seq, "SVID_RND  ");
 
        if (pkt_dev->flags & F_NODE)
-               seq_printf(seq, "NODE_ALLOC  ");
+               seq_puts(seq, "NODE_ALLOC  ");
 
        seq_puts(seq, "\n");
 
@@ -716,7 +716,7 @@ static int pktgen_if_show(struct seq_file *seq, void *v)
        if (pkt_dev->result[0])
                seq_printf(seq, "Result: %s\n", pkt_dev->result);
        else
-               seq_printf(seq, "Result: Idle\n");
+               seq_puts(seq, "Result: Idle\n");
 
        return 0;
 }
@@ -1735,14 +1735,14 @@ static int pktgen_thread_show(struct seq_file *seq, void *v)
 
        BUG_ON(!t);
 
-       seq_printf(seq, "Running: ");
+       seq_puts(seq, "Running: ");
 
        if_lock(t);
        list_for_each_entry(pkt_dev, &t->if_list, list)
                if (pkt_dev->running)
                        seq_printf(seq, "%s ", pkt_dev->odevname);
 
-       seq_printf(seq, "\nStopped: ");
+       seq_puts(seq, "\nStopped: ");
 
        list_for_each_entry(pkt_dev, &t->if_list, list)
                if (!pkt_dev->running)
@@ -1751,7 +1751,7 @@ static int pktgen_thread_show(struct seq_file *seq, void *v)
        if (t->result[0])
                seq_printf(seq, "\nResult: %s\n", t->result);
        else
-               seq_printf(seq, "\nResult: NA\n");
+               seq_puts(seq, "\nResult: NA\n");
 
        if_unlock(t);
 
index eaba0f68f8608618870a7009f27352523bee108f..37d86157b76ee3e572653c4455cb26b03fa50a8a 100644 (file)
@@ -88,7 +88,7 @@ EXPORT_SYMBOL_GPL(ptp_classify_raw);
 
 void __init ptp_classifier_init(void)
 {
-       static struct sock_filter ptp_filter[] = {
+       static struct sock_filter ptp_filter[] __initdata = {
                { 0x28,  0,  0, 0x0000000c },
                { 0x15,  0, 12, 0x00000800 },
                { 0x30,  0,  0, 0x00000017 },
index d4ff41739b0f23fcb572905dd34288cb1d8ebd49..9837bebf93cea9e9a2f909947326b83b3a3356f9 100644 (file)
@@ -774,7 +774,8 @@ static inline int rtnl_vfinfo_size(const struct net_device *dev,
                return 0;
 }
 
-static size_t rtnl_port_size(const struct net_device *dev)
+static size_t rtnl_port_size(const struct net_device *dev,
+                            u32 ext_filter_mask)
 {
        size_t port_size = nla_total_size(4)            /* PORT_VF */
                + nla_total_size(PORT_PROFILE_MAX)      /* PORT_PROFILE */
@@ -790,7 +791,8 @@ static size_t rtnl_port_size(const struct net_device *dev)
        size_t port_self_size = nla_total_size(sizeof(struct nlattr))
                + port_size;
 
-       if (!dev->netdev_ops->ndo_get_vf_port || !dev->dev.parent)
+       if (!dev->netdev_ops->ndo_get_vf_port || !dev->dev.parent ||
+           !(ext_filter_mask & RTEXT_FILTER_VF))
                return 0;
        if (dev_num_vf(dev->dev.parent))
                return port_self_size + vf_ports_size +
@@ -826,7 +828,7 @@ static noinline size_t if_nlmsg_size(const struct net_device *dev,
               + nla_total_size(ext_filter_mask
                                & RTEXT_FILTER_VF ? 4 : 0) /* IFLA_NUM_VF */
               + rtnl_vfinfo_size(dev, ext_filter_mask) /* IFLA_VFINFO_LIST */
-              + rtnl_port_size(dev) /* IFLA_VF_PORTS + IFLA_PORT_SELF */
+              + rtnl_port_size(dev, ext_filter_mask) /* IFLA_VF_PORTS + IFLA_PORT_SELF */
               + rtnl_link_get_size(dev) /* IFLA_LINKINFO */
               + rtnl_link_get_af_size(dev) /* IFLA_AF_SPEC */
               + nla_total_size(MAX_PHYS_PORT_ID_LEN); /* IFLA_PHYS_PORT_ID */
@@ -888,11 +890,13 @@ static int rtnl_port_self_fill(struct sk_buff *skb, struct net_device *dev)
        return 0;
 }
 
-static int rtnl_port_fill(struct sk_buff *skb, struct net_device *dev)
+static int rtnl_port_fill(struct sk_buff *skb, struct net_device *dev,
+                         u32 ext_filter_mask)
 {
        int err;
 
-       if (!dev->netdev_ops->ndo_get_vf_port || !dev->dev.parent)
+       if (!dev->netdev_ops->ndo_get_vf_port || !dev->dev.parent ||
+           !(ext_filter_mask & RTEXT_FILTER_VF))
                return 0;
 
        err = rtnl_port_self_fill(skb, dev);
@@ -1079,7 +1083,7 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
                nla_nest_end(skb, vfinfo);
        }
 
-       if (rtnl_port_fill(skb, dev))
+       if (rtnl_port_fill(skb, dev, ext_filter_mask))
                goto nla_put_failure;
 
        if (dev->rtnl_link_ops || rtnl_have_link_slave_info(dev)) {
@@ -1198,6 +1202,7 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
        struct hlist_head *head;
        struct nlattr *tb[IFLA_MAX+1];
        u32 ext_filter_mask = 0;
+       int err;
 
        s_h = cb->args[0];
        s_idx = cb->args[1];
@@ -1218,11 +1223,17 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
                hlist_for_each_entry_rcu(dev, head, index_hlist) {
                        if (idx < s_idx)
                                goto cont;
-                       if (rtnl_fill_ifinfo(skb, dev, RTM_NEWLINK,
-                                            NETLINK_CB(cb->skb).portid,
-                                            cb->nlh->nlmsg_seq, 0,
-                                            NLM_F_MULTI,
-                                            ext_filter_mask) <= 0)
+                       err = rtnl_fill_ifinfo(skb, dev, RTM_NEWLINK,
+                                              NETLINK_CB(cb->skb).portid,
+                                              cb->nlh->nlmsg_seq, 0,
+                                              NLM_F_MULTI,
+                                              ext_filter_mask);
+                       /* If we ran out of room on the first message,
+                        * we're in trouble
+                        */
+                       WARN_ON((err == -EMSGSIZE) && (skb->len == 0));
+
+                       if (err <= 0)
                                goto out;
 
                        nl_dump_check_consistent(cb, nlmsg_hdr(skb));
@@ -1395,7 +1406,8 @@ static int do_set_master(struct net_device *dev, int ifindex)
        return 0;
 }
 
-static int do_setlink(struct net_device *dev, struct ifinfomsg *ifm,
+static int do_setlink(const struct sk_buff *skb,
+                     struct net_device *dev, struct ifinfomsg *ifm,
                      struct nlattr **tb, char *ifname, int modified)
 {
        const struct net_device_ops *ops = dev->netdev_ops;
@@ -1407,7 +1419,7 @@ static int do_setlink(struct net_device *dev, struct ifinfomsg *ifm,
                        err = PTR_ERR(net);
                        goto errout;
                }
-               if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) {
+               if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN)) {
                        err = -EPERM;
                        goto errout;
                }
@@ -1661,7 +1673,7 @@ static int rtnl_setlink(struct sk_buff *skb, struct nlmsghdr *nlh)
        if (err < 0)
                goto errout;
 
-       err = do_setlink(dev, ifm, tb, ifname, 0);
+       err = do_setlink(skb, dev, ifm, tb, ifname, 0);
 errout:
        return err;
 }
@@ -1778,7 +1790,8 @@ err:
 }
 EXPORT_SYMBOL(rtnl_create_link);
 
-static int rtnl_group_changelink(struct net *net, int group,
+static int rtnl_group_changelink(const struct sk_buff *skb,
+               struct net *net, int group,
                struct ifinfomsg *ifm,
                struct nlattr **tb)
 {
@@ -1787,7 +1800,7 @@ static int rtnl_group_changelink(struct net *net, int group,
 
        for_each_netdev(net, dev) {
                if (dev->group == group) {
-                       err = do_setlink(dev, ifm, tb, NULL, 0);
+                       err = do_setlink(skb, dev, ifm, tb, NULL, 0);
                        if (err < 0)
                                return err;
                }
@@ -1929,12 +1942,12 @@ replay:
                                modified = 1;
                        }
 
-                       return do_setlink(dev, ifm, tb, ifname, modified);
+                       return do_setlink(skb, dev, ifm, tb, ifname, modified);
                }
 
                if (!(nlh->nlmsg_flags & NLM_F_CREATE)) {
                        if (ifm->ifi_index == 0 && tb[IFLA_GROUP])
-                               return rtnl_group_changelink(net,
+                               return rtnl_group_changelink(skb, net,
                                                nla_get_u32(tb[IFLA_GROUP]),
                                                ifm, tb);
                        return -ENODEV;
@@ -2321,7 +2334,7 @@ static int rtnl_fdb_del(struct sk_buff *skb, struct nlmsghdr *nlh)
        int err = -EINVAL;
        __u8 *addr;
 
-       if (!capable(CAP_NET_ADMIN))
+       if (!netlink_capable(skb, CAP_NET_ADMIN))
                return -EPERM;
 
        err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX, NULL);
@@ -2773,7 +2786,7 @@ static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
        sz_idx = type>>2;
        kind = type&3;
 
-       if (kind != 2 && !ns_capable(net->user_ns, CAP_NET_ADMIN))
+       if (kind != 2 && !netlink_net_capable(skb, CAP_NET_ADMIN))
                return -EPERM;
 
        if (kind == 2 && nlh->nlmsg_flags&NLM_F_DUMP) {
index 1b62343f58378b3d8fc0e3ea048dbb45ce1e3a76..3d74530ae82bf6f9005a4594b4fdfcfabc0918c6 100644 (file)
@@ -694,7 +694,7 @@ static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
 #endif
        memcpy(new->cb, old->cb, sizeof(old->cb));
        new->csum               = old->csum;
-       new->local_df           = old->local_df;
+       new->ignore_df          = old->ignore_df;
        new->pkt_type           = old->pkt_type;
        new->ip_summed          = old->ip_summed;
        skb_copy_queue_mapping(new, old);
@@ -3913,7 +3913,7 @@ void skb_scrub_packet(struct sk_buff *skb, bool xnet)
        skb->tstamp.tv64 = 0;
        skb->pkt_type = PACKET_HOST;
        skb->skb_iif = 0;
-       skb->local_df = 0;
+       skb->ignore_df = 0;
        skb_dst_drop(skb);
        skb->mark = 0;
        secpath_reset(skb);
index b4fff008136fafcca363e3a41ef441c2a1be878b..664ee4295b6f6ec38fb4f89d11c52eaa383a15d1 100644 (file)
 static DEFINE_MUTEX(proto_list_mutex);
 static LIST_HEAD(proto_list);
 
+/**
+ * sk_ns_capable - General socket capability test
+ * @sk: Socket to use a capability on or through
+ * @user_ns: The user namespace of the capability to use
+ * @cap: The capability to use
+ *
+ * Test to see if the opener of the socket had when the socket was
+ * created and the current process has the capability @cap in the user
+ * namespace @user_ns.
+ */
+bool sk_ns_capable(const struct sock *sk,
+                  struct user_namespace *user_ns, int cap)
+{
+       return file_ns_capable(sk->sk_socket->file, user_ns, cap) &&
+               ns_capable(user_ns, cap);
+}
+EXPORT_SYMBOL(sk_ns_capable);
+
+/**
+ * sk_capable - Socket global capability test
+ * @sk: Socket to use a capability on or through
+ * @cap: The global capbility to use
+ *
+ * Test to see if the opener of the socket had when the socket was
+ * created and the current process has the capability @cap in all user
+ * namespaces.
+ */
+bool sk_capable(const struct sock *sk, int cap)
+{
+       return sk_ns_capable(sk, &init_user_ns, cap);
+}
+EXPORT_SYMBOL(sk_capable);
+
+/**
+ * sk_net_capable - Network namespace socket capability test
+ * @sk: Socket to use a capability on or through
+ * @cap: The capability to use
+ *
+ * Test to see if the opener of the socket had when the socke was created
+ * and the current process has the capability @cap over the network namespace
+ * the socket is a member of.
+ */
+bool sk_net_capable(const struct sock *sk, int cap)
+{
+       return sk_ns_capable(sk, sock_net(sk)->user_ns, cap);
+}
+EXPORT_SYMBOL(sk_net_capable);
+
+
 #ifdef CONFIG_MEMCG_KMEM
 int mem_cgroup_sockets_init(struct mem_cgroup *memcg, struct cgroup_subsys *ss)
 {
index d7af1885932269eb9f4196fe1211a6d09e298b97..a4216a4c95720f105b0cd7841cd59e84d3dda7a6 100644 (file)
@@ -49,7 +49,7 @@ int sock_diag_put_meminfo(struct sock *sk, struct sk_buff *skb, int attrtype)
 }
 EXPORT_SYMBOL_GPL(sock_diag_put_meminfo);
 
-int sock_diag_put_filterinfo(struct user_namespace *user_ns, struct sock *sk,
+int sock_diag_put_filterinfo(bool may_report_filterinfo, struct sock *sk,
                             struct sk_buff *skb, int attrtype)
 {
        struct sock_fprog_kern *fprog;
@@ -58,7 +58,7 @@ int sock_diag_put_filterinfo(struct user_namespace *user_ns, struct sock *sk,
        unsigned int flen;
        int err = 0;
 
-       if (!ns_capable(user_ns, CAP_NET_ADMIN)) {
+       if (!may_report_filterinfo) {
                nla_reserve(skb, attrtype, 0);
                return 0;
        }
diff --git a/net/core/tso.c b/net/core/tso.c
new file mode 100644 (file)
index 0000000..097821d
--- /dev/null
@@ -0,0 +1,72 @@
+#include <net/ip.h>
+#include <net/tso.h>
+
+/* Calculate expected number of TX descriptors */
+int tso_count_descs(struct sk_buff *skb)
+{
+       /* The Marvell Way */
+       return skb_shinfo(skb)->gso_segs * 2 + skb_shinfo(skb)->nr_frags;
+}
+
+void tso_build_hdr(struct sk_buff *skb, char *hdr, struct tso_t *tso,
+                  int size, bool is_last)
+{
+       struct iphdr *iph;
+       struct tcphdr *tcph;
+       int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+       int mac_hdr_len = skb_network_offset(skb);
+
+       memcpy(hdr, skb->data, hdr_len);
+       iph = (struct iphdr *)(hdr + mac_hdr_len);
+       iph->id = htons(tso->ip_id);
+       iph->tot_len = htons(size + hdr_len - mac_hdr_len);
+       tcph = (struct tcphdr *)(hdr + skb_transport_offset(skb));
+       tcph->seq = htonl(tso->tcp_seq);
+       tso->ip_id++;
+
+       if (!is_last) {
+               /* Clear all special flags for not last packet */
+               tcph->psh = 0;
+               tcph->fin = 0;
+               tcph->rst = 0;
+       }
+}
+
+void tso_build_data(struct sk_buff *skb, struct tso_t *tso, int size)
+{
+       tso->tcp_seq += size;
+       tso->size -= size;
+       tso->data += size;
+
+       if ((tso->size == 0) &&
+           (tso->next_frag_idx < skb_shinfo(skb)->nr_frags)) {
+               skb_frag_t *frag = &skb_shinfo(skb)->frags[tso->next_frag_idx];
+
+               /* Move to next segment */
+               tso->size = frag->size;
+               tso->data = page_address(frag->page.p) + frag->page_offset;
+               tso->next_frag_idx++;
+       }
+}
+
+void tso_start(struct sk_buff *skb, struct tso_t *tso)
+{
+       int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+
+       tso->ip_id = ntohs(ip_hdr(skb)->id);
+       tso->tcp_seq = ntohl(tcp_hdr(skb)->seq);
+       tso->next_frag_idx = 0;
+
+       /* Build first data */
+       tso->size = skb_headlen(skb) - hdr_len;
+       tso->data = skb->data + hdr_len;
+       if ((tso->size == 0) &&
+           (tso->next_frag_idx < skb_shinfo(skb)->nr_frags)) {
+               skb_frag_t *frag = &skb_shinfo(skb)->frags[tso->next_frag_idx];
+
+               /* Move to next segment */
+               tso->size = frag->size;
+               tso->data = page_address(frag->page.p) + frag->page_offset;
+               tso->next_frag_idx++;
+       }
+}
index 553644402670b3461bde7fb26705ddde8729be75..f8b98d89c28527f049b4c3132aa7f0b412cfa0ef 100644 (file)
@@ -1669,7 +1669,7 @@ static int dcb_doit(struct sk_buff *skb, struct nlmsghdr *nlh)
        struct nlmsghdr *reply_nlh = NULL;
        const struct reply_func *fn;
 
-       if ((nlh->nlmsg_type == RTM_SETDCB) && !capable(CAP_NET_ADMIN))
+       if ((nlh->nlmsg_type == RTM_SETDCB) && !netlink_capable(skb, CAP_NET_ADMIN))
                return -EPERM;
 
        ret = nlmsg_parse(nlh, sizeof(*dcb), tb, DCB_ATTR_MAX,
index eb892b4f48144966e47f386108942f51b8b85e50..de2c1e7193057dee2e994386f5bb685e05b47163 100644 (file)
@@ -1084,14 +1084,15 @@ EXPORT_SYMBOL_GPL(dccp_shutdown);
 
 static inline int dccp_mib_init(void)
 {
-       return snmp_mib_init((void __percpu **)dccp_statistics,
-                            sizeof(struct dccp_mib),
-                            __alignof__(struct dccp_mib));
+       dccp_statistics = alloc_percpu(struct dccp_mib);
+       if (!dccp_statistics)
+               return -ENOMEM;
+       return 0;
 }
 
 static inline void dccp_mib_exit(void)
 {
-       snmp_mib_free((void __percpu **)dccp_statistics);
+       free_percpu(dccp_statistics);
 }
 
 static int thash_entries;
index 607ab71b5a0cb3af65067e7d69badb862d5bfb5a..53731e45403c83ba2edf48da01e71aeda48359b0 100644 (file)
@@ -20,6 +20,7 @@
 
 /* Boundary values */
 static int             zero     = 0,
+                       one      = 1,
                        u8_max   = 0xFF;
 static unsigned long   seqw_min = DCCPF_SEQ_WMIN,
                        seqw_max = 0xFFFFFFFF;          /* maximum on 32 bit */
@@ -58,7 +59,7 @@ static struct ctl_table dccp_default_table[] = {
                .maxlen         = sizeof(sysctl_dccp_request_retries),
                .mode           = 0644,
                .proc_handler   = proc_dointvec_minmax,
-               .extra1         = &zero,
+               .extra1         = &one,
                .extra2         = &u8_max,
        },
        {
index 16f0b223102e6619b3b3430a108be740e2e7b373..1cd46a345cb04387a50843a251637b6e3cbd7501 100644 (file)
@@ -280,7 +280,7 @@ static ktime_t dccp_timestamp_seed;
  */
 u32 dccp_timestamp(void)
 {
-       s64 delta = ktime_us_delta(ktime_get_real(), dccp_timestamp_seed);
+       u64 delta = (u64)ktime_us_delta(ktime_get_real(), dccp_timestamp_seed);
 
        do_div(delta, 10);
        return delta;
index a603823a3e279c850d1641e5f4d59be983400488..3b726f31c64c0b88efcfacd4d64df7177260ad5c 100644 (file)
@@ -574,7 +574,7 @@ static int dn_nl_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh)
        struct dn_ifaddr __rcu **ifap;
        int err = -EINVAL;
 
-       if (!capable(CAP_NET_ADMIN))
+       if (!netlink_capable(skb, CAP_NET_ADMIN))
                return -EPERM;
 
        if (!net_eq(net, &init_net))
@@ -618,7 +618,7 @@ static int dn_nl_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh)
        struct dn_ifaddr *ifa;
        int err;
 
-       if (!capable(CAP_NET_ADMIN))
+       if (!netlink_capable(skb, CAP_NET_ADMIN))
                return -EPERM;
 
        if (!net_eq(net, &init_net))
index 57dc159245ecfff38e318626cf0ea1ffa9db1cae..d332aefb0846f86a11d924e3e1e7ad23e279dda2 100644 (file)
@@ -505,7 +505,7 @@ static int dn_fib_rtm_delroute(struct sk_buff *skb, struct nlmsghdr *nlh)
        struct nlattr *attrs[RTA_MAX+1];
        int err;
 
-       if (!capable(CAP_NET_ADMIN))
+       if (!netlink_capable(skb, CAP_NET_ADMIN))
                return -EPERM;
 
        if (!net_eq(net, &init_net))
@@ -530,7 +530,7 @@ static int dn_fib_rtm_newroute(struct sk_buff *skb, struct nlmsghdr *nlh)
        struct nlattr *attrs[RTA_MAX+1];
        int err;
 
-       if (!capable(CAP_NET_ADMIN))
+       if (!netlink_capable(skb, CAP_NET_ADMIN))
                return -EPERM;
 
        if (!net_eq(net, &init_net))
index e83015cecfa7507d551bd19e4b4121ad0f25eeaf..e4d9560a910b0eb96ed3a4ad59d63771f865de3c 100644 (file)
@@ -107,7 +107,7 @@ static inline void dnrmg_receive_user_skb(struct sk_buff *skb)
        if (nlh->nlmsg_len < sizeof(*nlh) || skb->len < nlh->nlmsg_len)
                return;
 
-       if (!capable(CAP_NET_ADMIN))
+       if (!netlink_capable(skb, CAP_NET_ADMIN))
                RCV_SKB_FAIL(-EPERM);
 
        /* Eventually we might send routing messages too */
index 02c0e1716f641c947a5b906ee82c755e568a4a7c..64c5af0a10dd82169ccada3d82baa866a5b82cc8 100644 (file)
@@ -346,7 +346,7 @@ dsa_slave_create(struct dsa_switch *ds, struct device *parent,
                return slave_dev;
 
        slave_dev->features = master->vlan_features;
-       SET_ETHTOOL_OPS(slave_dev, &dsa_slave_ethtool_ops);
+       slave_dev->ethtool_ops = &dsa_slave_ethtool_ops;
        eth_hw_addr_inherit(slave_dev, master);
        slave_dev->tx_queue_len = 0;
 
index 0f5a69ed746d8384891ff9e3fd1aa570f04f91d5..1ae8a5628fb5b5e188926d8ae7e3d5e747fac092 100644 (file)
@@ -92,6 +92,7 @@ static int lowpan_header_create(struct sk_buff *skb,
        const u8 *saddr = _saddr;
        const u8 *daddr = _daddr;
        struct ieee802154_addr sa, da;
+       struct ieee802154_mac_cb *cb = mac_cb_init(skb);
 
        /* TODO:
         * if this package isn't ipv6 one, where should it be routed?
@@ -115,8 +116,7 @@ static int lowpan_header_create(struct sk_buff *skb,
         * from MAC subif of the 'dev' and 'real_dev' network devices, but
         * this isn't implemented in mainline yet, so currently we assign 0xff
         */
-       mac_cb(skb)->flags = IEEE802154_FC_TYPE_DATA;
-       mac_cb(skb)->seq = ieee802154_mlme_ops(dev)->get_dsn(dev);
+       cb->type = IEEE802154_FC_TYPE_DATA;
 
        /* prepare wpan address data */
        sa.mode = IEEE802154_ADDR_LONG;
@@ -135,11 +135,10 @@ static int lowpan_header_create(struct sk_buff *skb,
        } else {
                da.mode = IEEE802154_ADDR_LONG;
                da.extended_addr = ieee802154_devaddr_from_raw(daddr);
-
-               /* request acknowledgment */
-               mac_cb(skb)->flags |= MAC_CB_FLAG_ACKREQ;
        }
 
+       cb->ackreq = !lowpan_is_addr_broadcast(daddr);
+
        return dev_hard_header(skb, lowpan_dev_info(dev)->real_dev,
                        type, (void *)&da, (void *)&sa, 0);
 }
@@ -221,139 +220,149 @@ static int lowpan_set_address(struct net_device *dev, void *p)
        return 0;
 }
 
-static int
-lowpan_fragment_xmit(struct sk_buff *skb, u8 *head,
-                    int mlen, int plen, int offset, int type)
+static struct sk_buff*
+lowpan_alloc_frag(struct sk_buff *skb, int size,
+                 const struct ieee802154_hdr *master_hdr)
 {
+       struct net_device *real_dev = lowpan_dev_info(skb->dev)->real_dev;
        struct sk_buff *frag;
-       int hlen;
-
-       hlen = (type == LOWPAN_DISPATCH_FRAG1) ?
-                       LOWPAN_FRAG1_HEAD_SIZE : LOWPAN_FRAGN_HEAD_SIZE;
-
-       raw_dump_inline(__func__, "6lowpan fragment header", head, hlen);
+       int rc;
+
+       frag = alloc_skb(real_dev->hard_header_len +
+                        real_dev->needed_tailroom + size,
+                        GFP_ATOMIC);
+
+       if (likely(frag)) {
+               frag->dev = real_dev;
+               frag->priority = skb->priority;
+               skb_reserve(frag, real_dev->hard_header_len);
+               skb_reset_network_header(frag);
+               *mac_cb(frag) = *mac_cb(skb);
+
+               rc = dev_hard_header(frag, real_dev, 0, &master_hdr->dest,
+                                    &master_hdr->source, size);
+               if (rc < 0) {
+                       kfree_skb(frag);
+                       return ERR_PTR(-rc);
+               }
+       } else {
+               frag = ERR_PTR(ENOMEM);
+       }
 
-       frag = netdev_alloc_skb(skb->dev,
-                               hlen + mlen + plen + IEEE802154_MFR_SIZE);
-       if (!frag)
-               return -ENOMEM;
+       return frag;
+}
 
-       frag->priority = skb->priority;
+static int
+lowpan_xmit_fragment(struct sk_buff *skb, const struct ieee802154_hdr *wpan_hdr,
+                    u8 *frag_hdr, int frag_hdrlen,
+                    int offset, int len)
+{
+       struct sk_buff *frag;
 
-       /* copy header, MFR and payload */
-       skb_put(frag, mlen);
-       skb_copy_to_linear_data(frag, skb_mac_header(skb), mlen);
+       raw_dump_inline(__func__, " fragment header", frag_hdr, frag_hdrlen);
 
-       skb_put(frag, hlen);
-       skb_copy_to_linear_data_offset(frag, mlen, head, hlen);
+       frag = lowpan_alloc_frag(skb, frag_hdrlen + len, wpan_hdr);
+       if (IS_ERR(frag))
+               return -PTR_ERR(frag);
 
-       skb_put(frag, plen);
-       skb_copy_to_linear_data_offset(frag, mlen + hlen,
-                                      skb_network_header(skb) + offset, plen);
+       memcpy(skb_put(frag, frag_hdrlen), frag_hdr, frag_hdrlen);
+       memcpy(skb_put(frag, len), skb_network_header(skb) + offset, len);
 
-       raw_dump_table(__func__, " raw fragment dump", frag->data, frag->len);
+       raw_dump_table(__func__, " fragment dump", frag->data, frag->len);
 
        return dev_queue_xmit(frag);
 }
 
 static int
-lowpan_skb_fragmentation(struct sk_buff *skb, struct net_device *dev)
+lowpan_xmit_fragmented(struct sk_buff *skb, struct net_device *dev,
+                      const struct ieee802154_hdr *wpan_hdr)
 {
-       int err;
-       u16 dgram_offset, dgram_size, payload_length, header_length,
-           lowpan_size, frag_plen, offset;
-       __be16 tag;
-       u8 head[5];
-
-       header_length = skb->mac_len;
-       payload_length = skb->len - header_length;
-       tag = lowpan_dev_info(dev)->fragment_tag++;
-       lowpan_size = skb_network_header_len(skb);
+       u16 dgram_size, dgram_offset;
+       __be16 frag_tag;
+       u8 frag_hdr[5];
+       int frag_cap, frag_len, payload_cap, rc;
+       int skb_unprocessed, skb_offset;
+
        dgram_size = lowpan_uncompress_size(skb, &dgram_offset) -
-                    header_length;
+                    skb->mac_len;
+       frag_tag = lowpan_dev_info(dev)->fragment_tag++;
 
-       /* first fragment header */
-       head[0] = LOWPAN_DISPATCH_FRAG1 | ((dgram_size >> 8) & 0x7);
-       head[1] = dgram_size & 0xff;
-       memcpy(head + 2, &tag, sizeof(tag));
+       frag_hdr[0] = LOWPAN_DISPATCH_FRAG1 | ((dgram_size >> 8) & 0x07);
+       frag_hdr[1] = dgram_size & 0xff;
+       memcpy(frag_hdr + 2, &frag_tag, sizeof(frag_tag));
 
-       /* calc the nearest payload length(divided to 8) for first fragment
-        * which fits into a IEEE802154_MTU
-        */
-       frag_plen = round_down(IEEE802154_MTU - header_length -
-                              LOWPAN_FRAG1_HEAD_SIZE - lowpan_size -
-                              IEEE802154_MFR_SIZE, 8);
-
-       err = lowpan_fragment_xmit(skb, head, header_length,
-                                  frag_plen + lowpan_size, 0,
-                                  LOWPAN_DISPATCH_FRAG1);
-       if (err) {
-               pr_debug("%s unable to send FRAG1 packet (tag: %d)",
-                        __func__, tag);
-               goto exit;
-       }
+       payload_cap = ieee802154_max_payload(wpan_hdr);
 
-       offset = lowpan_size + frag_plen;
-       dgram_offset += frag_plen;
+       frag_len = round_down(payload_cap - LOWPAN_FRAG1_HEAD_SIZE -
+                             skb_network_header_len(skb), 8);
 
-       /* next fragment header */
-       head[0] &= ~LOWPAN_DISPATCH_FRAG1;
-       head[0] |= LOWPAN_DISPATCH_FRAGN;
+       skb_offset = skb_network_header_len(skb);
+       skb_unprocessed = skb->len - skb->mac_len - skb_offset;
 
-       frag_plen = round_down(IEEE802154_MTU - header_length -
-                              LOWPAN_FRAGN_HEAD_SIZE - IEEE802154_MFR_SIZE, 8);
+       rc = lowpan_xmit_fragment(skb, wpan_hdr, frag_hdr,
+                                 LOWPAN_FRAG1_HEAD_SIZE, 0,
+                                 frag_len + skb_network_header_len(skb));
+       if (rc) {
+               pr_debug("%s unable to send FRAG1 packet (tag: %d)",
+                        __func__, frag_tag);
+               goto err;
+       }
 
-       while (payload_length - offset > 0) {
-               int len = frag_plen;
+       frag_hdr[0] &= ~LOWPAN_DISPATCH_FRAG1;
+       frag_hdr[0] |= LOWPAN_DISPATCH_FRAGN;
+       frag_cap = round_down(payload_cap - LOWPAN_FRAGN_HEAD_SIZE, 8);
 
-               head[4] = dgram_offset >> 3;
+       while (skb_unprocessed >= frag_cap) {
+               dgram_offset += frag_len;
+               skb_offset += frag_len;
+               skb_unprocessed -= frag_len;
+               frag_len = min(frag_cap, skb_unprocessed);
 
-               if (payload_length - offset < len)
-                       len = payload_length - offset;
+               frag_hdr[4] = dgram_offset >> 3;
 
-               err = lowpan_fragment_xmit(skb, head, header_length, len,
-                                          offset, LOWPAN_DISPATCH_FRAGN);
-               if (err) {
+               rc = lowpan_xmit_fragment(skb, wpan_hdr, frag_hdr,
+                                         LOWPAN_FRAGN_HEAD_SIZE, skb_offset,
+                                         frag_len);
+               if (rc) {
                        pr_debug("%s unable to send a FRAGN packet. (tag: %d, offset: %d)\n",
-                                __func__, tag, offset);
-                       goto exit;
+                                __func__, frag_tag, skb_offset);
+                       goto err;
                }
-
-               offset += len;
-               dgram_offset += len;
        }
 
-exit:
-       return err;
+       consume_skb(skb);
+       return NET_XMIT_SUCCESS;
+
+err:
+       kfree_skb(skb);
+       return rc;
 }
 
 static netdev_tx_t lowpan_xmit(struct sk_buff *skb, struct net_device *dev)
 {
-       int err = -1;
+       struct ieee802154_hdr wpan_hdr;
+       int max_single;
 
        pr_debug("package xmit\n");
 
-       skb->dev = lowpan_dev_info(dev)->real_dev;
-       if (skb->dev == NULL) {
-               pr_debug("ERROR: no real wpan device found\n");
-               goto error;
+       if (ieee802154_hdr_peek(skb, &wpan_hdr) < 0) {
+               kfree_skb(skb);
+               return NET_XMIT_DROP;
        }
 
-       /* Send directly if less than the MTU minus the 2 checksum bytes. */
-       if (skb->len <= IEEE802154_MTU - IEEE802154_MFR_SIZE) {
-               err = dev_queue_xmit(skb);
-               goto out;
-       }
+       max_single = ieee802154_max_payload(&wpan_hdr);
 
-       pr_debug("frame is too big, fragmentation is needed\n");
-       err = lowpan_skb_fragmentation(skb, dev);
-error:
-       dev_kfree_skb(skb);
-out:
-       if (err)
-               pr_debug("ERROR: xmit failed\n");
+       if (skb_tail_pointer(skb) - skb_network_header(skb) <= max_single) {
+               skb->dev = lowpan_dev_info(dev)->real_dev;
+               return dev_queue_xmit(skb);
+       } else {
+               netdev_tx_t rc;
+
+               pr_debug("frame is too big, fragmentation is needed\n");
+               rc = lowpan_xmit_fragmented(skb, dev, &wpan_hdr);
 
-       return (err < 0) ? NET_XMIT_DROP : err;
+               return rc < 0 ? NET_XMIT_DROP : rc;
+       }
 }
 
 static struct wpan_phy *lowpan_get_phy(const struct net_device *dev)
index 786437bc0c08531785d3f5fa1deb5bff8efe9e62..4f0ed8780194502465f0d5b60383bf6794bfadf0 100644 (file)
@@ -21,6 +21,7 @@
  * Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
  */
 
+#include <linux/capability.h>
 #include <linux/net.h>
 #include <linux/module.h>
 #include <linux/if_arp.h>
@@ -45,7 +46,12 @@ struct dgram_sock {
        struct ieee802154_addr dst_addr;
 
        unsigned int bound:1;
+       unsigned int connected:1;
        unsigned int want_ack:1;
+       unsigned int secen:1;
+       unsigned int secen_override:1;
+       unsigned int seclevel:3;
+       unsigned int seclevel_override:1;
 };
 
 static inline struct dgram_sock *dgram_sk(const struct sock *sk)
@@ -73,10 +79,7 @@ static int dgram_init(struct sock *sk)
 {
        struct dgram_sock *ro = dgram_sk(sk);
 
-       ro->dst_addr.mode = IEEE802154_ADDR_LONG;
-       ro->dst_addr.pan_id = cpu_to_le16(IEEE802154_PANID_BROADCAST);
        ro->want_ack = 1;
-       memset(&ro->dst_addr.extended_addr, 0xff, IEEE802154_ADDR_LEN);
        return 0;
 }
 
@@ -183,6 +186,7 @@ static int dgram_connect(struct sock *sk, struct sockaddr *uaddr,
        }
 
        ieee802154_addr_from_sa(&ro->dst_addr, &addr->addr);
+       ro->connected = 1;
 
 out:
        release_sock(sk);
@@ -194,10 +198,7 @@ static int dgram_disconnect(struct sock *sk, int flags)
        struct dgram_sock *ro = dgram_sk(sk);
 
        lock_sock(sk);
-
-       ro->dst_addr.mode = IEEE802154_ADDR_LONG;
-       memset(&ro->dst_addr.extended_addr, 0xff, IEEE802154_ADDR_LEN);
-
+       ro->connected = 0;
        release_sock(sk);
 
        return 0;
@@ -209,7 +210,9 @@ static int dgram_sendmsg(struct kiocb *iocb, struct sock *sk,
        struct net_device *dev;
        unsigned int mtu;
        struct sk_buff *skb;
+       struct ieee802154_mac_cb *cb;
        struct dgram_sock *ro = dgram_sk(sk);
+       struct ieee802154_addr dst_addr;
        int hlen, tlen;
        int err;
 
@@ -218,6 +221,11 @@ static int dgram_sendmsg(struct kiocb *iocb, struct sock *sk,
                return -EOPNOTSUPP;
        }
 
+       if (!ro->connected && !msg->msg_name)
+               return -EDESTADDRREQ;
+       else if (ro->connected && msg->msg_name)
+               return -EISCONN;
+
        if (!ro->bound)
                dev = dev_getfirstbyhwtype(sock_net(sk), ARPHRD_IEEE802154);
        else
@@ -249,18 +257,28 @@ static int dgram_sendmsg(struct kiocb *iocb, struct sock *sk,
 
        skb_reset_network_header(skb);
 
-       mac_cb(skb)->flags = IEEE802154_FC_TYPE_DATA;
-       if (ro->want_ack)
-               mac_cb(skb)->flags |= MAC_CB_FLAG_ACKREQ;
+       cb = mac_cb_init(skb);
+       cb->type = IEEE802154_FC_TYPE_DATA;
+       cb->ackreq = ro->want_ack;
+
+       if (msg->msg_name) {
+               DECLARE_SOCKADDR(struct sockaddr_ieee802154*, daddr, msg->msg_name);
 
-       mac_cb(skb)->seq = ieee802154_mlme_ops(dev)->get_dsn(dev);
-       err = dev_hard_header(skb, dev, ETH_P_IEEE802154, &ro->dst_addr,
-                       ro->bound ? &ro->src_addr : NULL, size);
+               ieee802154_addr_from_sa(&dst_addr, &daddr->addr);
+       } else {
+               dst_addr = ro->dst_addr;
+       }
+
+       cb->secen = ro->secen;
+       cb->secen_override = ro->secen_override;
+       cb->seclevel = ro->seclevel;
+       cb->seclevel_override = ro->seclevel_override;
+
+       err = dev_hard_header(skb, dev, ETH_P_IEEE802154, &dst_addr,
+                             ro->bound ? &ro->src_addr : NULL, size);
        if (err < 0)
                goto out_skb;
 
-       skb_reset_mac_header(skb);
-
        err = memcpy_fromiovec(skb_put(skb, size), msg->msg_iov, size);
        if (err < 0)
                goto out_skb;
@@ -419,6 +437,20 @@ static int dgram_getsockopt(struct sock *sk, int level, int optname,
        case WPAN_WANTACK:
                val = ro->want_ack;
                break;
+       case WPAN_SECURITY:
+               if (!ro->secen_override)
+                       val = WPAN_SECURITY_DEFAULT;
+               else if (ro->secen)
+                       val = WPAN_SECURITY_ON;
+               else
+                       val = WPAN_SECURITY_OFF;
+               break;
+       case WPAN_SECURITY_LEVEL:
+               if (!ro->seclevel_override)
+                       val = WPAN_SECURITY_LEVEL_DEFAULT;
+               else
+                       val = ro->seclevel;
+               break;
        default:
                return -ENOPROTOOPT;
        }
@@ -434,6 +466,7 @@ static int dgram_setsockopt(struct sock *sk, int level, int optname,
                    char __user *optval, unsigned int optlen)
 {
        struct dgram_sock *ro = dgram_sk(sk);
+       struct net *net = sock_net(sk);
        int val;
        int err = 0;
 
@@ -449,6 +482,47 @@ static int dgram_setsockopt(struct sock *sk, int level, int optname,
        case WPAN_WANTACK:
                ro->want_ack = !!val;
                break;
+       case WPAN_SECURITY:
+               if (!ns_capable(net->user_ns, CAP_NET_ADMIN) &&
+                   !ns_capable(net->user_ns, CAP_NET_RAW)) {
+                       err = -EPERM;
+                       break;
+               }
+
+               switch (val) {
+               case WPAN_SECURITY_DEFAULT:
+                       ro->secen_override = 0;
+                       break;
+               case WPAN_SECURITY_ON:
+                       ro->secen_override = 1;
+                       ro->secen = 1;
+                       break;
+               case WPAN_SECURITY_OFF:
+                       ro->secen_override = 1;
+                       ro->secen = 0;
+                       break;
+               default:
+                       err = -EINVAL;
+                       break;
+               }
+               break;
+       case WPAN_SECURITY_LEVEL:
+               if (!ns_capable(net->user_ns, CAP_NET_ADMIN) &&
+                   !ns_capable(net->user_ns, CAP_NET_RAW)) {
+                       err = -EPERM;
+                       break;
+               }
+
+               if (val < WPAN_SECURITY_LEVEL_DEFAULT ||
+                   val > IEEE802154_SCF_SECLEVEL_ENC_MIC128) {
+                       err = -EINVAL;
+               } else if (val == WPAN_SECURITY_LEVEL_DEFAULT) {
+                       ro->seclevel_override = 0;
+               } else {
+                       ro->seclevel_override = 1;
+                       ro->seclevel = val;
+               }
+               break;
        default:
                err = -ENOPROTOOPT;
                break;
index bed42a48408c6cc71cf4a47ba5bd897603cfaf7b..c09294e39ca60326d5b40c8431bf202ce5559225 100644 (file)
@@ -195,15 +195,16 @@ ieee802154_hdr_get_sechdr(const u8 *buf, struct ieee802154_sechdr *hdr)
        return pos;
 }
 
+static int ieee802154_sechdr_lengths[4] = {
+       [IEEE802154_SCF_KEY_IMPLICIT] = 5,
+       [IEEE802154_SCF_KEY_INDEX] = 6,
+       [IEEE802154_SCF_KEY_SHORT_INDEX] = 10,
+       [IEEE802154_SCF_KEY_HW_INDEX] = 14,
+};
+
 static int ieee802154_hdr_sechdr_len(u8 sc)
 {
-       switch (IEEE802154_SCF_KEY_ID_MODE(sc)) {
-       case IEEE802154_SCF_KEY_IMPLICIT: return 5;
-       case IEEE802154_SCF_KEY_INDEX: return 6;
-       case IEEE802154_SCF_KEY_SHORT_INDEX: return 10;
-       case IEEE802154_SCF_KEY_HW_INDEX: return 14;
-       default: return -EINVAL;
-       }
+       return ieee802154_sechdr_lengths[IEEE802154_SCF_KEY_ID_MODE(sc)];
 }
 
 static int ieee802154_hdr_minlen(const struct ieee802154_hdr *hdr)
@@ -285,3 +286,40 @@ ieee802154_hdr_peek_addrs(const struct sk_buff *skb, struct ieee802154_hdr *hdr)
        return pos;
 }
 EXPORT_SYMBOL_GPL(ieee802154_hdr_peek_addrs);
+
+int
+ieee802154_hdr_peek(const struct sk_buff *skb, struct ieee802154_hdr *hdr)
+{
+       const u8 *buf = skb_mac_header(skb);
+       int pos;
+
+       pos = ieee802154_hdr_peek_addrs(skb, hdr);
+       if (pos < 0)
+               return -EINVAL;
+
+       if (hdr->fc.security_enabled) {
+               u8 key_id_mode = IEEE802154_SCF_KEY_ID_MODE(*(buf + pos));
+               int want = pos + ieee802154_sechdr_lengths[key_id_mode];
+
+               if (buf + want > skb_tail_pointer(skb))
+                       return -EINVAL;
+
+               pos += ieee802154_hdr_get_sechdr(buf + pos, &hdr->sec);
+       }
+
+       return pos;
+}
+EXPORT_SYMBOL_GPL(ieee802154_hdr_peek);
+
+int ieee802154_max_payload(const struct ieee802154_hdr *hdr)
+{
+       int hlen = ieee802154_hdr_minlen(hdr);
+
+       if (hdr->fc.security_enabled) {
+               hlen += ieee802154_sechdr_lengths[hdr->sec.key_id_mode] - 1;
+               hlen += ieee802154_sechdr_authtag_len(&hdr->sec);
+       }
+
+       return IEEE802154_MTU - hlen - IEEE802154_MFR_SIZE;
+}
+EXPORT_SYMBOL_GPL(ieee802154_max_payload);
index 6693a5cf01ce5e5fc39fcd33a64547dc4bdb5748..8b83a231299e46a0668b3fe329803fa1a6154791 100644 (file)
@@ -68,4 +68,23 @@ int ieee802154_list_iface(struct sk_buff *skb, struct genl_info *info);
 int ieee802154_dump_iface(struct sk_buff *skb, struct netlink_callback *cb);
 int ieee802154_set_macparams(struct sk_buff *skb, struct genl_info *info);
 
+int ieee802154_llsec_getparams(struct sk_buff *skb, struct genl_info *info);
+int ieee802154_llsec_setparams(struct sk_buff *skb, struct genl_info *info);
+int ieee802154_llsec_add_key(struct sk_buff *skb, struct genl_info *info);
+int ieee802154_llsec_del_key(struct sk_buff *skb, struct genl_info *info);
+int ieee802154_llsec_dump_keys(struct sk_buff *skb,
+                              struct netlink_callback *cb);
+int ieee802154_llsec_add_dev(struct sk_buff *skb, struct genl_info *info);
+int ieee802154_llsec_del_dev(struct sk_buff *skb, struct genl_info *info);
+int ieee802154_llsec_dump_devs(struct sk_buff *skb,
+                              struct netlink_callback *cb);
+int ieee802154_llsec_add_devkey(struct sk_buff *skb, struct genl_info *info);
+int ieee802154_llsec_del_devkey(struct sk_buff *skb, struct genl_info *info);
+int ieee802154_llsec_dump_devkeys(struct sk_buff *skb,
+                                 struct netlink_callback *cb);
+int ieee802154_llsec_add_seclevel(struct sk_buff *skb, struct genl_info *info);
+int ieee802154_llsec_del_seclevel(struct sk_buff *skb, struct genl_info *info);
+int ieee802154_llsec_dump_seclevels(struct sk_buff *skb,
+                                   struct netlink_callback *cb);
+
 #endif
index 04b20589d97ab91eeb203ba0527122184936dc06..26efcf4fd2ff72079a678ef3a4dbd0ae887848e1 100644 (file)
@@ -124,6 +124,26 @@ static const struct genl_ops ieee8021154_ops[] = {
        IEEE802154_DUMP(IEEE802154_LIST_IFACE, ieee802154_list_iface,
                        ieee802154_dump_iface),
        IEEE802154_OP(IEEE802154_SET_MACPARAMS, ieee802154_set_macparams),
+       IEEE802154_OP(IEEE802154_LLSEC_GETPARAMS, ieee802154_llsec_getparams),
+       IEEE802154_OP(IEEE802154_LLSEC_SETPARAMS, ieee802154_llsec_setparams),
+       IEEE802154_DUMP(IEEE802154_LLSEC_LIST_KEY, NULL,
+                       ieee802154_llsec_dump_keys),
+       IEEE802154_OP(IEEE802154_LLSEC_ADD_KEY, ieee802154_llsec_add_key),
+       IEEE802154_OP(IEEE802154_LLSEC_DEL_KEY, ieee802154_llsec_del_key),
+       IEEE802154_DUMP(IEEE802154_LLSEC_LIST_DEV, NULL,
+                       ieee802154_llsec_dump_devs),
+       IEEE802154_OP(IEEE802154_LLSEC_ADD_DEV, ieee802154_llsec_add_dev),
+       IEEE802154_OP(IEEE802154_LLSEC_DEL_DEV, ieee802154_llsec_del_dev),
+       IEEE802154_DUMP(IEEE802154_LLSEC_LIST_DEVKEY, NULL,
+                       ieee802154_llsec_dump_devkeys),
+       IEEE802154_OP(IEEE802154_LLSEC_ADD_DEVKEY, ieee802154_llsec_add_devkey),
+       IEEE802154_OP(IEEE802154_LLSEC_DEL_DEVKEY, ieee802154_llsec_del_devkey),
+       IEEE802154_DUMP(IEEE802154_LLSEC_LIST_SECLEVEL, NULL,
+                       ieee802154_llsec_dump_seclevels),
+       IEEE802154_OP(IEEE802154_LLSEC_ADD_SECLEVEL,
+                     ieee802154_llsec_add_seclevel),
+       IEEE802154_OP(IEEE802154_LLSEC_DEL_SECLEVEL,
+                     ieee802154_llsec_del_seclevel),
 };
 
 static const struct genl_multicast_group ieee802154_mcgrps[] = {
index 5d285498c0f691a906de5d9c82c64f867a857418..a3281b8bfd5bf1fa24bd05a351b476031776797f 100644 (file)
@@ -715,3 +715,812 @@ out:
        dev_put(dev);
        return rc;
 }
+
+
+
+static int
+ieee802154_llsec_parse_key_id(struct genl_info *info,
+                             struct ieee802154_llsec_key_id *desc)
+{
+       memset(desc, 0, sizeof(*desc));
+
+       if (!info->attrs[IEEE802154_ATTR_LLSEC_KEY_MODE])
+               return -EINVAL;
+
+       desc->mode = nla_get_u8(info->attrs[IEEE802154_ATTR_LLSEC_KEY_MODE]);
+
+       if (desc->mode == IEEE802154_SCF_KEY_IMPLICIT) {
+               if (!info->attrs[IEEE802154_ATTR_PAN_ID] &&
+                   !(info->attrs[IEEE802154_ATTR_SHORT_ADDR] ||
+                     info->attrs[IEEE802154_ATTR_HW_ADDR]))
+                       return -EINVAL;
+
+               desc->device_addr.pan_id = nla_get_shortaddr(info->attrs[IEEE802154_ATTR_PAN_ID]);
+
+               if (info->attrs[IEEE802154_ATTR_SHORT_ADDR]) {
+                       desc->device_addr.mode = IEEE802154_ADDR_SHORT;
+                       desc->device_addr.short_addr = nla_get_shortaddr(info->attrs[IEEE802154_ATTR_SHORT_ADDR]);
+               } else {
+                       desc->device_addr.mode = IEEE802154_ADDR_LONG;
+                       desc->device_addr.extended_addr = nla_get_hwaddr(info->attrs[IEEE802154_ATTR_HW_ADDR]);
+               }
+       }
+
+       if (desc->mode != IEEE802154_SCF_KEY_IMPLICIT &&
+           !info->attrs[IEEE802154_ATTR_LLSEC_KEY_ID])
+               return -EINVAL;
+
+       if (desc->mode == IEEE802154_SCF_KEY_SHORT_INDEX &&
+           !info->attrs[IEEE802154_ATTR_LLSEC_KEY_SOURCE_SHORT])
+               return -EINVAL;
+
+       if (desc->mode == IEEE802154_SCF_KEY_HW_INDEX &&
+           !info->attrs[IEEE802154_ATTR_LLSEC_KEY_SOURCE_EXTENDED])
+               return -EINVAL;
+
+       if (desc->mode != IEEE802154_SCF_KEY_IMPLICIT)
+               desc->id = nla_get_u8(info->attrs[IEEE802154_ATTR_LLSEC_KEY_ID]);
+
+       switch (desc->mode) {
+       case IEEE802154_SCF_KEY_SHORT_INDEX:
+       {
+               u32 source = nla_get_u32(info->attrs[IEEE802154_ATTR_LLSEC_KEY_SOURCE_SHORT]);
+               desc->short_source = cpu_to_le32(source);
+               break;
+       }
+       case IEEE802154_SCF_KEY_HW_INDEX:
+               desc->extended_source = nla_get_hwaddr(info->attrs[IEEE802154_ATTR_LLSEC_KEY_SOURCE_EXTENDED]);
+               break;
+       }
+
+       return 0;
+}
+
+static int
+ieee802154_llsec_fill_key_id(struct sk_buff *msg,
+                            const struct ieee802154_llsec_key_id *desc)
+{
+       if (nla_put_u8(msg, IEEE802154_ATTR_LLSEC_KEY_MODE, desc->mode))
+               return -EMSGSIZE;
+
+       if (desc->mode == IEEE802154_SCF_KEY_IMPLICIT) {
+               if (nla_put_shortaddr(msg, IEEE802154_ATTR_PAN_ID,
+                                     desc->device_addr.pan_id))
+                       return -EMSGSIZE;
+
+               if (desc->device_addr.mode == IEEE802154_ADDR_SHORT &&
+                   nla_put_shortaddr(msg, IEEE802154_ATTR_SHORT_ADDR,
+                                     desc->device_addr.short_addr))
+                       return -EMSGSIZE;
+
+               if (desc->device_addr.mode == IEEE802154_ADDR_LONG &&
+                   nla_put_hwaddr(msg, IEEE802154_ATTR_HW_ADDR,
+                                  desc->device_addr.extended_addr))
+                       return -EMSGSIZE;
+       }
+
+       if (desc->mode != IEEE802154_SCF_KEY_IMPLICIT &&
+           nla_put_u8(msg, IEEE802154_ATTR_LLSEC_KEY_ID, desc->id))
+               return -EMSGSIZE;
+
+       if (desc->mode == IEEE802154_SCF_KEY_SHORT_INDEX &&
+           nla_put_u32(msg, IEEE802154_ATTR_LLSEC_KEY_SOURCE_SHORT,
+                       le32_to_cpu(desc->short_source)))
+               return -EMSGSIZE;
+
+       if (desc->mode == IEEE802154_SCF_KEY_HW_INDEX &&
+           nla_put_hwaddr(msg, IEEE802154_ATTR_LLSEC_KEY_SOURCE_EXTENDED,
+                          desc->extended_source))
+               return -EMSGSIZE;
+
+       return 0;
+}
+
+int ieee802154_llsec_getparams(struct sk_buff *skb, struct genl_info *info)
+{
+       struct sk_buff *msg;
+       struct net_device *dev = NULL;
+       int rc = -ENOBUFS;
+       struct ieee802154_mlme_ops *ops;
+       void *hdr;
+       struct ieee802154_llsec_params params;
+
+       pr_debug("%s\n", __func__);
+
+       dev = ieee802154_nl_get_dev(info);
+       if (!dev)
+               return -ENODEV;
+
+       ops = ieee802154_mlme_ops(dev);
+       if (!ops->llsec) {
+               rc = -EOPNOTSUPP;
+               goto out_dev;
+       }
+
+       msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+       if (!msg)
+               goto out_dev;
+
+       hdr = genlmsg_put(msg, 0, info->snd_seq, &nl802154_family, 0,
+               IEEE802154_LLSEC_GETPARAMS);
+       if (!hdr)
+               goto out_free;
+
+       rc = ops->llsec->get_params(dev, &params);
+       if (rc < 0)
+               goto out_free;
+
+       if (nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name) ||
+           nla_put_u32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex) ||
+           nla_put_u8(msg, IEEE802154_ATTR_LLSEC_ENABLED, params.enabled) ||
+           nla_put_u8(msg, IEEE802154_ATTR_LLSEC_SECLEVEL, params.out_level) ||
+           nla_put_u32(msg, IEEE802154_ATTR_LLSEC_FRAME_COUNTER,
+                       be32_to_cpu(params.frame_counter)) ||
+           ieee802154_llsec_fill_key_id(msg, &params.out_key))
+               goto out_free;
+
+       dev_put(dev);
+
+       return ieee802154_nl_reply(msg, info);
+out_free:
+       nlmsg_free(msg);
+out_dev:
+       dev_put(dev);
+       return rc;
+}
+
+int ieee802154_llsec_setparams(struct sk_buff *skb, struct genl_info *info)
+{
+       struct net_device *dev = NULL;
+       int rc = -EINVAL;
+       struct ieee802154_mlme_ops *ops;
+       struct ieee802154_llsec_params params;
+       int changed = 0;
+
+       pr_debug("%s\n", __func__);
+
+       dev = ieee802154_nl_get_dev(info);
+       if (!dev)
+               return -ENODEV;
+
+       if (!info->attrs[IEEE802154_ATTR_LLSEC_ENABLED] &&
+           !info->attrs[IEEE802154_ATTR_LLSEC_KEY_MODE] &&
+           !info->attrs[IEEE802154_ATTR_LLSEC_SECLEVEL])
+               goto out;
+
+       ops = ieee802154_mlme_ops(dev);
+       if (!ops->llsec) {
+               rc = -EOPNOTSUPP;
+               goto out;
+       }
+
+       if (info->attrs[IEEE802154_ATTR_LLSEC_SECLEVEL] &&
+           nla_get_u8(info->attrs[IEEE802154_ATTR_LLSEC_SECLEVEL]) > 7)
+               goto out;
+
+       if (info->attrs[IEEE802154_ATTR_LLSEC_ENABLED]) {
+               params.enabled = nla_get_u8(info->attrs[IEEE802154_ATTR_LLSEC_ENABLED]);
+               changed |= IEEE802154_LLSEC_PARAM_ENABLED;
+       }
+
+       if (info->attrs[IEEE802154_ATTR_LLSEC_KEY_MODE]) {
+               if (ieee802154_llsec_parse_key_id(info, &params.out_key))
+                       goto out;
+
+               changed |= IEEE802154_LLSEC_PARAM_OUT_KEY;
+       }
+
+       if (info->attrs[IEEE802154_ATTR_LLSEC_SECLEVEL]) {
+               params.out_level = nla_get_u8(info->attrs[IEEE802154_ATTR_LLSEC_SECLEVEL]);
+               changed |= IEEE802154_LLSEC_PARAM_OUT_LEVEL;
+       }
+
+       if (info->attrs[IEEE802154_ATTR_LLSEC_FRAME_COUNTER]) {
+               u32 fc = nla_get_u32(info->attrs[IEEE802154_ATTR_LLSEC_FRAME_COUNTER]);
+
+               params.frame_counter = cpu_to_be32(fc);
+               changed |= IEEE802154_LLSEC_PARAM_FRAME_COUNTER;
+       }
+
+       rc = ops->llsec->set_params(dev, &params, changed);
+
+       dev_put(dev);
+
+       return rc;
+out:
+       dev_put(dev);
+       return rc;
+}
+
+
+
+struct llsec_dump_data {
+       struct sk_buff *skb;
+       int s_idx, s_idx2;
+       int portid;
+       int nlmsg_seq;
+       struct net_device *dev;
+       struct ieee802154_mlme_ops *ops;
+       struct ieee802154_llsec_table *table;
+};
+
+static int
+ieee802154_llsec_dump_table(struct sk_buff *skb, struct netlink_callback *cb,
+                           int (*step)(struct llsec_dump_data*))
+{
+       struct net *net = sock_net(skb->sk);
+       struct net_device *dev;
+       struct llsec_dump_data data;
+       int idx = 0;
+       int first_dev = cb->args[0];
+       int rc;
+
+       for_each_netdev(net, dev) {
+               if (idx < first_dev || dev->type != ARPHRD_IEEE802154)
+                       goto skip;
+
+               data.ops = ieee802154_mlme_ops(dev);
+               if (!data.ops->llsec)
+                       goto skip;
+
+               data.skb = skb;
+               data.s_idx = cb->args[1];
+               data.s_idx2 = cb->args[2];
+               data.dev = dev;
+               data.portid = NETLINK_CB(cb->skb).portid;
+               data.nlmsg_seq = cb->nlh->nlmsg_seq;
+
+               data.ops->llsec->lock_table(dev);
+               data.ops->llsec->get_table(data.dev, &data.table);
+               rc = step(&data);
+               data.ops->llsec->unlock_table(dev);
+
+               if (rc < 0)
+                       break;
+
+skip:
+               idx++;
+       }
+       cb->args[0] = idx;
+
+       return skb->len;
+}
+
+static int
+ieee802154_nl_llsec_change(struct sk_buff *skb, struct genl_info *info,
+                          int (*fn)(struct net_device*, struct genl_info*))
+{
+       struct net_device *dev = NULL;
+       int rc = -EINVAL;
+
+       dev = ieee802154_nl_get_dev(info);
+       if (!dev)
+               return -ENODEV;
+
+       if (!ieee802154_mlme_ops(dev)->llsec)
+               rc = -EOPNOTSUPP;
+       else
+               rc = fn(dev, info);
+
+       dev_put(dev);
+       return rc;
+}
+
+
+
+static int
+ieee802154_llsec_parse_key(struct genl_info *info,
+                          struct ieee802154_llsec_key *key)
+{
+       u8 frames;
+       u32 commands[256 / 32];
+
+       memset(key, 0, sizeof(*key));
+
+       if (!info->attrs[IEEE802154_ATTR_LLSEC_KEY_USAGE_FRAME_TYPES] ||
+           !info->attrs[IEEE802154_ATTR_LLSEC_KEY_BYTES])
+               return -EINVAL;
+
+       frames = nla_get_u8(info->attrs[IEEE802154_ATTR_LLSEC_KEY_USAGE_FRAME_TYPES]);
+       if ((frames & BIT(IEEE802154_FC_TYPE_MAC_CMD)) &&
+           !info->attrs[IEEE802154_ATTR_LLSEC_KEY_USAGE_COMMANDS])
+               return -EINVAL;
+
+       if (info->attrs[IEEE802154_ATTR_LLSEC_KEY_USAGE_COMMANDS]) {
+               nla_memcpy(commands,
+                          info->attrs[IEEE802154_ATTR_LLSEC_KEY_USAGE_COMMANDS],
+                          256 / 8);
+
+               if (commands[0] || commands[1] || commands[2] || commands[3] ||
+                   commands[4] || commands[5] || commands[6] ||
+                   commands[7] >= BIT(IEEE802154_CMD_GTS_REQ + 1))
+                       return -EINVAL;
+
+               key->cmd_frame_ids = commands[7];
+       }
+
+       key->frame_types = frames;
+
+       nla_memcpy(key->key, info->attrs[IEEE802154_ATTR_LLSEC_KEY_BYTES],
+                  IEEE802154_LLSEC_KEY_SIZE);
+
+       return 0;
+}
+
+static int llsec_add_key(struct net_device *dev, struct genl_info *info)
+{
+       struct ieee802154_mlme_ops *ops = ieee802154_mlme_ops(dev);
+       struct ieee802154_llsec_key key;
+       struct ieee802154_llsec_key_id id;
+
+       if (ieee802154_llsec_parse_key(info, &key) ||
+           ieee802154_llsec_parse_key_id(info, &id))
+               return -EINVAL;
+
+       return ops->llsec->add_key(dev, &id, &key);
+}
+
+int ieee802154_llsec_add_key(struct sk_buff *skb, struct genl_info *info)
+{
+       if ((info->nlhdr->nlmsg_flags & (NLM_F_CREATE | NLM_F_EXCL)) !=
+           (NLM_F_CREATE | NLM_F_EXCL))
+               return -EINVAL;
+
+       return ieee802154_nl_llsec_change(skb, info, llsec_add_key);
+}
+
+static int llsec_remove_key(struct net_device *dev, struct genl_info *info)
+{
+       struct ieee802154_mlme_ops *ops = ieee802154_mlme_ops(dev);
+       struct ieee802154_llsec_key_id id;
+
+       if (ieee802154_llsec_parse_key_id(info, &id))
+               return -EINVAL;
+
+       return ops->llsec->del_key(dev, &id);
+}
+
+int ieee802154_llsec_del_key(struct sk_buff *skb, struct genl_info *info)
+{
+       return ieee802154_nl_llsec_change(skb, info, llsec_remove_key);
+}
+
+static int
+ieee802154_nl_fill_key(struct sk_buff *msg, u32 portid, u32 seq,
+                      const struct ieee802154_llsec_key_entry *key,
+                      const struct net_device *dev)
+{
+       void *hdr;
+       u32 commands[256 / 32];
+
+       hdr = genlmsg_put(msg, 0, seq, &nl802154_family, NLM_F_MULTI,
+                         IEEE802154_LLSEC_LIST_KEY);
+       if (!hdr)
+               goto out;
+
+       if (nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name) ||
+           nla_put_u32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex) ||
+           ieee802154_llsec_fill_key_id(msg, &key->id) ||
+           nla_put_u8(msg, IEEE802154_ATTR_LLSEC_KEY_USAGE_FRAME_TYPES,
+                      key->key->frame_types))
+               goto nla_put_failure;
+
+       if (key->key->frame_types & BIT(IEEE802154_FC_TYPE_MAC_CMD)) {
+               memset(commands, 0, sizeof(commands));
+               commands[7] = key->key->cmd_frame_ids;
+               if (nla_put(msg, IEEE802154_ATTR_LLSEC_KEY_USAGE_COMMANDS,
+                           sizeof(commands), commands))
+                       goto nla_put_failure;
+       }
+
+       if (nla_put(msg, IEEE802154_ATTR_LLSEC_KEY_BYTES,
+                   IEEE802154_LLSEC_KEY_SIZE, key->key->key))
+               goto nla_put_failure;
+
+       genlmsg_end(msg, hdr);
+       return 0;
+
+nla_put_failure:
+       genlmsg_cancel(msg, hdr);
+out:
+       return -EMSGSIZE;
+}
+
+static int llsec_iter_keys(struct llsec_dump_data *data)
+{
+       struct ieee802154_llsec_key_entry *pos;
+       int rc = 0, idx = 0;
+
+       list_for_each_entry(pos, &data->table->keys, list) {
+               if (idx++ < data->s_idx)
+                       continue;
+
+               if (ieee802154_nl_fill_key(data->skb, data->portid,
+                                          data->nlmsg_seq, pos, data->dev)) {
+                       rc = -EMSGSIZE;
+                       break;
+               }
+
+               data->s_idx++;
+       }
+
+       return rc;
+}
+
+int ieee802154_llsec_dump_keys(struct sk_buff *skb, struct netlink_callback *cb)
+{
+       return ieee802154_llsec_dump_table(skb, cb, llsec_iter_keys);
+}
+
+
+
+static int
+llsec_parse_dev(struct genl_info *info,
+               struct ieee802154_llsec_device *dev)
+{
+       memset(dev, 0, sizeof(*dev));
+
+       if (!info->attrs[IEEE802154_ATTR_LLSEC_FRAME_COUNTER] ||
+           !info->attrs[IEEE802154_ATTR_HW_ADDR] ||
+           !info->attrs[IEEE802154_ATTR_LLSEC_DEV_OVERRIDE] ||
+           !info->attrs[IEEE802154_ATTR_LLSEC_DEV_KEY_MODE] ||
+           (!!info->attrs[IEEE802154_ATTR_PAN_ID] !=
+            !!info->attrs[IEEE802154_ATTR_SHORT_ADDR]))
+               return -EINVAL;
+
+       if (info->attrs[IEEE802154_ATTR_PAN_ID]) {
+               dev->pan_id = nla_get_shortaddr(info->attrs[IEEE802154_ATTR_PAN_ID]);
+               dev->short_addr = nla_get_shortaddr(info->attrs[IEEE802154_ATTR_SHORT_ADDR]);
+       } else {
+               dev->short_addr = cpu_to_le16(IEEE802154_ADDR_UNDEF);
+       }
+
+       dev->hwaddr = nla_get_hwaddr(info->attrs[IEEE802154_ATTR_HW_ADDR]);
+       dev->frame_counter = nla_get_u32(info->attrs[IEEE802154_ATTR_LLSEC_FRAME_COUNTER]);
+       dev->seclevel_exempt = !!nla_get_u8(info->attrs[IEEE802154_ATTR_LLSEC_DEV_OVERRIDE]);
+       dev->key_mode = nla_get_u8(info->attrs[IEEE802154_ATTR_LLSEC_DEV_KEY_MODE]);
+
+       if (dev->key_mode >= __IEEE802154_LLSEC_DEVKEY_MAX)
+               return -EINVAL;
+
+       return 0;
+}
+
+static int llsec_add_dev(struct net_device *dev, struct genl_info *info)
+{
+       struct ieee802154_mlme_ops *ops = ieee802154_mlme_ops(dev);
+       struct ieee802154_llsec_device desc;
+
+       if (llsec_parse_dev(info, &desc))
+               return -EINVAL;
+
+       return ops->llsec->add_dev(dev, &desc);
+}
+
+int ieee802154_llsec_add_dev(struct sk_buff *skb, struct genl_info *info)
+{
+       if ((info->nlhdr->nlmsg_flags & (NLM_F_CREATE | NLM_F_EXCL)) !=
+           (NLM_F_CREATE | NLM_F_EXCL))
+               return -EINVAL;
+
+       return ieee802154_nl_llsec_change(skb, info, llsec_add_dev);
+}
+
+static int llsec_del_dev(struct net_device *dev, struct genl_info *info)
+{
+       struct ieee802154_mlme_ops *ops = ieee802154_mlme_ops(dev);
+       __le64 devaddr;
+
+       if (!info->attrs[IEEE802154_ATTR_HW_ADDR])
+               return -EINVAL;
+
+       devaddr = nla_get_hwaddr(info->attrs[IEEE802154_ATTR_HW_ADDR]);
+
+       return ops->llsec->del_dev(dev, devaddr);
+}
+
+int ieee802154_llsec_del_dev(struct sk_buff *skb, struct genl_info *info)
+{
+       return ieee802154_nl_llsec_change(skb, info, llsec_del_dev);
+}
+
+static int
+ieee802154_nl_fill_dev(struct sk_buff *msg, u32 portid, u32 seq,
+                      const struct ieee802154_llsec_device *desc,
+                      const struct net_device *dev)
+{
+       void *hdr;
+
+       hdr = genlmsg_put(msg, 0, seq, &nl802154_family, NLM_F_MULTI,
+                         IEEE802154_LLSEC_LIST_DEV);
+       if (!hdr)
+               goto out;
+
+       if (nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name) ||
+           nla_put_u32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex) ||
+           nla_put_shortaddr(msg, IEEE802154_ATTR_PAN_ID, desc->pan_id) ||
+           nla_put_shortaddr(msg, IEEE802154_ATTR_SHORT_ADDR,
+                             desc->short_addr) ||
+           nla_put_hwaddr(msg, IEEE802154_ATTR_HW_ADDR, desc->hwaddr) ||
+           nla_put_u32(msg, IEEE802154_ATTR_LLSEC_FRAME_COUNTER,
+                       desc->frame_counter) ||
+           nla_put_u8(msg, IEEE802154_ATTR_LLSEC_DEV_OVERRIDE,
+                      desc->seclevel_exempt) ||
+           nla_put_u8(msg, IEEE802154_ATTR_LLSEC_DEV_KEY_MODE, desc->key_mode))
+               goto nla_put_failure;
+
+       genlmsg_end(msg, hdr);
+       return 0;
+
+nla_put_failure:
+       genlmsg_cancel(msg, hdr);
+out:
+       return -EMSGSIZE;
+}
+
+static int llsec_iter_devs(struct llsec_dump_data *data)
+{
+       struct ieee802154_llsec_device *pos;
+       int rc = 0, idx = 0;
+
+       list_for_each_entry(pos, &data->table->devices, list) {
+               if (idx++ < data->s_idx)
+                       continue;
+
+               if (ieee802154_nl_fill_dev(data->skb, data->portid,
+                                          data->nlmsg_seq, pos, data->dev)) {
+                       rc = -EMSGSIZE;
+                       break;
+               }
+
+               data->s_idx++;
+       }
+
+       return rc;
+}
+
+int ieee802154_llsec_dump_devs(struct sk_buff *skb, struct netlink_callback *cb)
+{
+       return ieee802154_llsec_dump_table(skb, cb, llsec_iter_devs);
+}
+
+
+
+static int llsec_add_devkey(struct net_device *dev, struct genl_info *info)
+{
+       struct ieee802154_mlme_ops *ops = ieee802154_mlme_ops(dev);
+       struct ieee802154_llsec_device_key key;
+       __le64 devaddr;
+
+       if (!info->attrs[IEEE802154_ATTR_LLSEC_FRAME_COUNTER] ||
+           !info->attrs[IEEE802154_ATTR_HW_ADDR] ||
+           ieee802154_llsec_parse_key_id(info, &key.key_id))
+               return -EINVAL;
+
+       devaddr = nla_get_hwaddr(info->attrs[IEEE802154_ATTR_HW_ADDR]);
+       key.frame_counter = nla_get_u32(info->attrs[IEEE802154_ATTR_LLSEC_FRAME_COUNTER]);
+
+       return ops->llsec->add_devkey(dev, devaddr, &key);
+}
+
+int ieee802154_llsec_add_devkey(struct sk_buff *skb, struct genl_info *info)
+{
+       if ((info->nlhdr->nlmsg_flags & (NLM_F_CREATE | NLM_F_EXCL)) !=
+           (NLM_F_CREATE | NLM_F_EXCL))
+               return -EINVAL;
+
+       return ieee802154_nl_llsec_change(skb, info, llsec_add_devkey);
+}
+
+static int llsec_del_devkey(struct net_device *dev, struct genl_info *info)
+{
+       struct ieee802154_mlme_ops *ops = ieee802154_mlme_ops(dev);
+       struct ieee802154_llsec_device_key key;
+       __le64 devaddr;
+
+       if (!info->attrs[IEEE802154_ATTR_HW_ADDR] ||
+           ieee802154_llsec_parse_key_id(info, &key.key_id))
+               return -EINVAL;
+
+       devaddr = nla_get_hwaddr(info->attrs[IEEE802154_ATTR_HW_ADDR]);
+
+       return ops->llsec->del_devkey(dev, devaddr, &key);
+}
+
+int ieee802154_llsec_del_devkey(struct sk_buff *skb, struct genl_info *info)
+{
+       return ieee802154_nl_llsec_change(skb, info, llsec_del_devkey);
+}
+
+static int
+ieee802154_nl_fill_devkey(struct sk_buff *msg, u32 portid, u32 seq,
+                         __le64 devaddr,
+                         const struct ieee802154_llsec_device_key *devkey,
+                         const struct net_device *dev)
+{
+       void *hdr;
+
+       hdr = genlmsg_put(msg, 0, seq, &nl802154_family, NLM_F_MULTI,
+                         IEEE802154_LLSEC_LIST_DEVKEY);
+       if (!hdr)
+               goto out;
+
+       if (nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name) ||
+           nla_put_u32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex) ||
+           nla_put_hwaddr(msg, IEEE802154_ATTR_HW_ADDR, devaddr) ||
+           nla_put_u32(msg, IEEE802154_ATTR_LLSEC_FRAME_COUNTER,
+                       devkey->frame_counter) ||
+           ieee802154_llsec_fill_key_id(msg, &devkey->key_id))
+               goto nla_put_failure;
+
+       genlmsg_end(msg, hdr);
+       return 0;
+
+nla_put_failure:
+       genlmsg_cancel(msg, hdr);
+out:
+       return -EMSGSIZE;
+}
+
+static int llsec_iter_devkeys(struct llsec_dump_data *data)
+{
+       struct ieee802154_llsec_device *dpos;
+       struct ieee802154_llsec_device_key *kpos;
+       int rc = 0, idx = 0, idx2;
+
+       list_for_each_entry(dpos, &data->table->devices, list) {
+               if (idx++ < data->s_idx)
+                       continue;
+
+               idx2 = 0;
+
+               list_for_each_entry(kpos, &dpos->keys, list) {
+                       if (idx2++ < data->s_idx2)
+                               continue;
+
+                       if (ieee802154_nl_fill_devkey(data->skb, data->portid,
+                                                     data->nlmsg_seq,
+                                                     dpos->hwaddr, kpos,
+                                                     data->dev)) {
+                               return rc = -EMSGSIZE;
+                       }
+
+                       data->s_idx2++;
+               }
+
+               data->s_idx++;
+       }
+
+       return rc;
+}
+
+int ieee802154_llsec_dump_devkeys(struct sk_buff *skb,
+                                 struct netlink_callback *cb)
+{
+       return ieee802154_llsec_dump_table(skb, cb, llsec_iter_devkeys);
+}
+
+
+
+static int
+llsec_parse_seclevel(struct genl_info *info,
+                    struct ieee802154_llsec_seclevel *sl)
+{
+       memset(sl, 0, sizeof(*sl));
+
+       if (!info->attrs[IEEE802154_ATTR_LLSEC_FRAME_TYPE] ||
+           !info->attrs[IEEE802154_ATTR_LLSEC_SECLEVELS] ||
+           !info->attrs[IEEE802154_ATTR_LLSEC_DEV_OVERRIDE])
+               return -EINVAL;
+
+       sl->frame_type = nla_get_u8(info->attrs[IEEE802154_ATTR_LLSEC_FRAME_TYPE]);
+       if (sl->frame_type == IEEE802154_FC_TYPE_MAC_CMD) {
+               if (!info->attrs[IEEE802154_ATTR_LLSEC_CMD_FRAME_ID])
+                       return -EINVAL;
+
+               sl->cmd_frame_id = nla_get_u8(info->attrs[IEEE802154_ATTR_LLSEC_CMD_FRAME_ID]);
+       }
+
+       sl->sec_levels = nla_get_u8(info->attrs[IEEE802154_ATTR_LLSEC_SECLEVELS]);
+       sl->device_override = nla_get_u8(info->attrs[IEEE802154_ATTR_LLSEC_DEV_OVERRIDE]);
+
+       return 0;
+}
+
+static int llsec_add_seclevel(struct net_device *dev, struct genl_info *info)
+{
+       struct ieee802154_mlme_ops *ops = ieee802154_mlme_ops(dev);
+       struct ieee802154_llsec_seclevel sl;
+
+       if (llsec_parse_seclevel(info, &sl))
+               return -EINVAL;
+
+       return ops->llsec->add_seclevel(dev, &sl);
+}
+
+int ieee802154_llsec_add_seclevel(struct sk_buff *skb, struct genl_info *info)
+{
+       if ((info->nlhdr->nlmsg_flags & (NLM_F_CREATE | NLM_F_EXCL)) !=
+           (NLM_F_CREATE | NLM_F_EXCL))
+               return -EINVAL;
+
+       return ieee802154_nl_llsec_change(skb, info, llsec_add_seclevel);
+}
+
+static int llsec_del_seclevel(struct net_device *dev, struct genl_info *info)
+{
+       struct ieee802154_mlme_ops *ops = ieee802154_mlme_ops(dev);
+       struct ieee802154_llsec_seclevel sl;
+
+       if (llsec_parse_seclevel(info, &sl))
+               return -EINVAL;
+
+       return ops->llsec->del_seclevel(dev, &sl);
+}
+
+int ieee802154_llsec_del_seclevel(struct sk_buff *skb, struct genl_info *info)
+{
+       return ieee802154_nl_llsec_change(skb, info, llsec_del_seclevel);
+}
+
+static int
+ieee802154_nl_fill_seclevel(struct sk_buff *msg, u32 portid, u32 seq,
+                           const struct ieee802154_llsec_seclevel *sl,
+                           const struct net_device *dev)
+{
+       void *hdr;
+
+       hdr = genlmsg_put(msg, 0, seq, &nl802154_family, NLM_F_MULTI,
+                         IEEE802154_LLSEC_LIST_SECLEVEL);
+       if (!hdr)
+               goto out;
+
+       if (nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name) ||
+           nla_put_u32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex) ||
+           nla_put_u8(msg, IEEE802154_ATTR_LLSEC_FRAME_TYPE, sl->frame_type) ||
+           nla_put_u8(msg, IEEE802154_ATTR_LLSEC_SECLEVELS, sl->sec_levels) ||
+           nla_put_u8(msg, IEEE802154_ATTR_LLSEC_DEV_OVERRIDE,
+                      sl->device_override))
+               goto nla_put_failure;
+
+       if (sl->frame_type == IEEE802154_FC_TYPE_MAC_CMD &&
+           nla_put_u8(msg, IEEE802154_ATTR_LLSEC_CMD_FRAME_ID,
+                      sl->cmd_frame_id))
+               goto nla_put_failure;
+
+       genlmsg_end(msg, hdr);
+       return 0;
+
+nla_put_failure:
+       genlmsg_cancel(msg, hdr);
+out:
+       return -EMSGSIZE;
+}
+
+static int llsec_iter_seclevels(struct llsec_dump_data *data)
+{
+       struct ieee802154_llsec_seclevel *pos;
+       int rc = 0, idx = 0;
+
+       list_for_each_entry(pos, &data->table->security_levels, list) {
+               if (idx++ < data->s_idx)
+                       continue;
+
+               if (ieee802154_nl_fill_seclevel(data->skb, data->portid,
+                                               data->nlmsg_seq, pos,
+                                               data->dev)) {
+                       rc = -EMSGSIZE;
+                       break;
+               }
+
+               data->s_idx++;
+       }
+
+       return rc;
+}
+
+int ieee802154_llsec_dump_seclevels(struct sk_buff *skb,
+                                   struct netlink_callback *cb)
+{
+       return ieee802154_llsec_dump_table(skb, cb, llsec_iter_seclevels);
+}
index fd7be5e45cefb99d37df9d173c9bf7c0ab1d7dc3..3a703ab88348fc38481ec583ff5d0885ae8c9655 100644 (file)
@@ -62,5 +62,21 @@ const struct nla_policy ieee802154_policy[IEEE802154_ATTR_MAX + 1] = {
        [IEEE802154_ATTR_CSMA_MAX_BE] = { .type = NLA_U8, },
 
        [IEEE802154_ATTR_FRAME_RETRIES] = { .type = NLA_S8, },
+
+       [IEEE802154_ATTR_LLSEC_ENABLED] = { .type = NLA_U8, },
+       [IEEE802154_ATTR_LLSEC_SECLEVEL] = { .type = NLA_U8, },
+       [IEEE802154_ATTR_LLSEC_KEY_MODE] = { .type = NLA_U8, },
+       [IEEE802154_ATTR_LLSEC_KEY_SOURCE_SHORT] = { .type = NLA_U32, },
+       [IEEE802154_ATTR_LLSEC_KEY_SOURCE_EXTENDED] = { .type = NLA_HW_ADDR, },
+       [IEEE802154_ATTR_LLSEC_KEY_ID] = { .type = NLA_U8, },
+       [IEEE802154_ATTR_LLSEC_FRAME_COUNTER] = { .type = NLA_U32 },
+       [IEEE802154_ATTR_LLSEC_KEY_BYTES] = { .len = 16, },
+       [IEEE802154_ATTR_LLSEC_KEY_USAGE_FRAME_TYPES] = { .type = NLA_U8, },
+       [IEEE802154_ATTR_LLSEC_KEY_USAGE_COMMANDS] = { .len = 258 / 8 },
+       [IEEE802154_ATTR_LLSEC_FRAME_TYPE] = { .type = NLA_U8, },
+       [IEEE802154_ATTR_LLSEC_CMD_FRAME_ID] = { .type = NLA_U8, },
+       [IEEE802154_ATTR_LLSEC_SECLEVELS] = { .type = NLA_U8, },
+       [IEEE802154_ATTR_LLSEC_DEV_OVERRIDE] = { .type = NLA_U8, },
+       [IEEE802154_ATTR_LLSEC_DEV_KEY_MODE] = { .type = NLA_U8, },
 };
 
index 8c54870db792cab059bff464d29776510ec3e5ec..279132bcadd92621a3e23e382fe5f03d48dac313 100644 (file)
@@ -1476,22 +1476,20 @@ int inet_ctl_sock_create(struct sock **sk, unsigned short family,
 }
 EXPORT_SYMBOL_GPL(inet_ctl_sock_create);
 
-unsigned long snmp_fold_field(void __percpu *mib[], int offt)
+unsigned long snmp_fold_field(void __percpu *mib, int offt)
 {
        unsigned long res = 0;
-       int i, j;
+       int i;
 
-       for_each_possible_cpu(i) {
-               for (j = 0; j < SNMP_ARRAY_SZ; j++)
-                       res += *(((unsigned long *) per_cpu_ptr(mib[j], i)) + offt);
-       }
+       for_each_possible_cpu(i)
+               res += *(((unsigned long *) per_cpu_ptr(mib, i)) + offt);
        return res;
 }
 EXPORT_SYMBOL_GPL(snmp_fold_field);
 
 #if BITS_PER_LONG==32
 
-u64 snmp_fold_field64(void __percpu *mib[], int offt, size_t syncp_offset)
+u64 snmp_fold_field64(void __percpu *mib, int offt, size_t syncp_offset)
 {
        u64 res = 0;
        int cpu;
@@ -1502,7 +1500,7 @@ u64 snmp_fold_field64(void __percpu *mib[], int offt, size_t syncp_offset)
                u64 v;
                unsigned int start;
 
-               bhptr = per_cpu_ptr(mib[0], cpu);
+               bhptr = per_cpu_ptr(mib, cpu);
                syncp = (struct u64_stats_sync *)(bhptr + syncp_offset);
                do {
                        start = u64_stats_fetch_begin_irq(syncp);
@@ -1516,25 +1514,6 @@ u64 snmp_fold_field64(void __percpu *mib[], int offt, size_t syncp_offset)
 EXPORT_SYMBOL_GPL(snmp_fold_field64);
 #endif
 
-int snmp_mib_init(void __percpu *ptr[2], size_t mibsize, size_t align)
-{
-       BUG_ON(ptr == NULL);
-       ptr[0] = __alloc_percpu(mibsize, align);
-       if (!ptr[0])
-               return -ENOMEM;
-
-#if SNMP_ARRAY_SZ == 2
-       ptr[1] = __alloc_percpu(mibsize, align);
-       if (!ptr[1]) {
-               free_percpu(ptr[0]);
-               ptr[0] = NULL;
-               return -ENOMEM;
-       }
-#endif
-       return 0;
-}
-EXPORT_SYMBOL_GPL(snmp_mib_init);
-
 #ifdef CONFIG_IP_MULTICAST
 static const struct net_protocol igmp_protocol = {
        .handler =      igmp_rcv,
@@ -1570,40 +1549,30 @@ static __net_init int ipv4_mib_init_net(struct net *net)
 {
        int i;
 
-       if (snmp_mib_init((void __percpu **)net->mib.tcp_statistics,
-                         sizeof(struct tcp_mib),
-                         __alignof__(struct tcp_mib)) < 0)
+       net->mib.tcp_statistics = alloc_percpu(struct tcp_mib);
+       if (!net->mib.tcp_statistics)
                goto err_tcp_mib;
-       if (snmp_mib_init((void __percpu **)net->mib.ip_statistics,
-                         sizeof(struct ipstats_mib),
-                         __alignof__(struct ipstats_mib)) < 0)
+       net->mib.ip_statistics = alloc_percpu(struct ipstats_mib);
+       if (!net->mib.ip_statistics)
                goto err_ip_mib;
 
        for_each_possible_cpu(i) {
                struct ipstats_mib *af_inet_stats;
-               af_inet_stats = per_cpu_ptr(net->mib.ip_statistics[0], i);
-               u64_stats_init(&af_inet_stats->syncp);
-#if SNMP_ARRAY_SZ == 2
-               af_inet_stats = per_cpu_ptr(net->mib.ip_statistics[1], i);
+               af_inet_stats = per_cpu_ptr(net->mib.ip_statistics, i);
                u64_stats_init(&af_inet_stats->syncp);
-#endif
        }
 
-       if (snmp_mib_init((void __percpu **)net->mib.net_statistics,
-                         sizeof(struct linux_mib),
-                         __alignof__(struct linux_mib)) < 0)
+       net->mib.net_statistics = alloc_percpu(struct linux_mib);
+       if (!net->mib.net_statistics)
                goto err_net_mib;
-       if (snmp_mib_init((void __percpu **)net->mib.udp_statistics,
-                         sizeof(struct udp_mib),
-                         __alignof__(struct udp_mib)) < 0)
+       net->mib.udp_statistics = alloc_percpu(struct udp_mib);
+       if (!net->mib.udp_statistics)
                goto err_udp_mib;
-       if (snmp_mib_init((void __percpu **)net->mib.udplite_statistics,
-                         sizeof(struct udp_mib),
-                         __alignof__(struct udp_mib)) < 0)
+       net->mib.udplite_statistics = alloc_percpu(struct udp_mib);
+       if (!net->mib.udplite_statistics)
                goto err_udplite_mib;
-       if (snmp_mib_init((void __percpu **)net->mib.icmp_statistics,
-                         sizeof(struct icmp_mib),
-                         __alignof__(struct icmp_mib)) < 0)
+       net->mib.icmp_statistics = alloc_percpu(struct icmp_mib);
+       if (!net->mib.icmp_statistics)
                goto err_icmp_mib;
        net->mib.icmpmsg_statistics = kzalloc(sizeof(struct icmpmsg_mib),
                                              GFP_KERNEL);
@@ -1614,17 +1583,17 @@ static __net_init int ipv4_mib_init_net(struct net *net)
        return 0;
 
 err_icmpmsg_mib:
-       snmp_mib_free((void __percpu **)net->mib.icmp_statistics);
+       free_percpu(net->mib.icmp_statistics);
 err_icmp_mib:
-       snmp_mib_free((void __percpu **)net->mib.udplite_statistics);
+       free_percpu(net->mib.udplite_statistics);
 err_udplite_mib:
-       snmp_mib_free((void __percpu **)net->mib.udp_statistics);
+       free_percpu(net->mib.udp_statistics);
 err_udp_mib:
-       snmp_mib_free((void __percpu **)net->mib.net_statistics);
+       free_percpu(net->mib.net_statistics);
 err_net_mib:
-       snmp_mib_free((void __percpu **)net->mib.ip_statistics);
+       free_percpu(net->mib.ip_statistics);
 err_ip_mib:
-       snmp_mib_free((void __percpu **)net->mib.tcp_statistics);
+       free_percpu(net->mib.tcp_statistics);
 err_tcp_mib:
        return -ENOMEM;
 }
@@ -1632,12 +1601,12 @@ err_tcp_mib:
 static __net_exit void ipv4_mib_exit_net(struct net *net)
 {
        kfree(net->mib.icmpmsg_statistics);
-       snmp_mib_free((void __percpu **)net->mib.icmp_statistics);
-       snmp_mib_free((void __percpu **)net->mib.udplite_statistics);
-       snmp_mib_free((void __percpu **)net->mib.udp_statistics);
-       snmp_mib_free((void __percpu **)net->mib.net_statistics);
-       snmp_mib_free((void __percpu **)net->mib.ip_statistics);
-       snmp_mib_free((void __percpu **)net->mib.tcp_statistics);
+       free_percpu(net->mib.icmp_statistics);
+       free_percpu(net->mib.udplite_statistics);
+       free_percpu(net->mib.udp_statistics);
+       free_percpu(net->mib.net_statistics);
+       free_percpu(net->mib.ip_statistics);
+       free_percpu(net->mib.tcp_statistics);
 }
 
 static __net_initdata struct pernet_operations ipv4_mib_ops = {
@@ -1650,6 +1619,39 @@ static int __init init_ipv4_mibs(void)
        return register_pernet_subsys(&ipv4_mib_ops);
 }
 
+static __net_init int inet_init_net(struct net *net)
+{
+       /*
+        * Set defaults for local port range
+        */
+       seqlock_init(&net->ipv4.ip_local_ports.lock);
+       net->ipv4.ip_local_ports.range[0] =  32768;
+       net->ipv4.ip_local_ports.range[1] =  61000;
+
+       seqlock_init(&net->ipv4.ping_group_range.lock);
+       /*
+        * Sane defaults - nobody may create ping sockets.
+        * Boot scripts should set this to distro-specific group.
+        */
+       net->ipv4.ping_group_range.range[0] = make_kgid(&init_user_ns, 1);
+       net->ipv4.ping_group_range.range[1] = make_kgid(&init_user_ns, 0);
+       return 0;
+}
+
+static __net_exit void inet_exit_net(struct net *net)
+{
+}
+
+static __net_initdata struct pernet_operations af_inet_ops = {
+       .init = inet_init_net,
+       .exit = inet_exit_net,
+};
+
+static int __init init_inet_pernet_ops(void)
+{
+       return register_pernet_subsys(&af_inet_ops);
+}
+
 static int ipv4_proc_init(void);
 
 /*
@@ -1703,13 +1705,9 @@ static int __init inet_init(void)
 
        BUILD_BUG_ON(sizeof(struct inet_skb_parm) > FIELD_SIZEOF(struct sk_buff, cb));
 
-       sysctl_local_reserved_ports = kzalloc(65536 / 8, GFP_KERNEL);
-       if (!sysctl_local_reserved_ports)
-               goto out;
-
        rc = proto_register(&tcp_prot, 1);
        if (rc)
-               goto out_free_reserved_ports;
+               goto out;
 
        rc = proto_register(&udp_prot, 1);
        if (rc)
@@ -1794,6 +1792,9 @@ static int __init inet_init(void)
        if (ip_mr_init())
                pr_crit("%s: Cannot init ipv4 mroute\n", __func__);
 #endif
+
+       if (init_inet_pernet_ops())
+               pr_crit("%s: Cannot init ipv4 inet pernet ops\n", __func__);
        /*
         *      Initialise per-cpu ipv4 mibs
         */
@@ -1816,8 +1817,6 @@ out_unregister_udp_proto:
        proto_unregister(&udp_prot);
 out_unregister_tcp_proto:
        proto_unregister(&tcp_prot);
-out_free_reserved_ports:
-       kfree(sysctl_local_reserved_ports);
        goto out;
 }
 
index bdbf68bb2e2d194fcdf94553bd41a4ac9f184d7c..e9449376b58e4293b7735362912e1ea1191a8815 100644 (file)
@@ -106,7 +106,6 @@ static const struct nla_policy ifa_ipv4_policy[IFA_MAX+1] = {
 #define IN4_ADDR_HSIZE         (1U << IN4_ADDR_HSIZE_SHIFT)
 
 static struct hlist_head inet_addr_lst[IN4_ADDR_HSIZE];
-static DEFINE_SPINLOCK(inet_addr_hash_lock);
 
 static u32 inet_addr_hash(struct net *net, __be32 addr)
 {
@@ -119,16 +118,14 @@ static void inet_hash_insert(struct net *net, struct in_ifaddr *ifa)
 {
        u32 hash = inet_addr_hash(net, ifa->ifa_local);
 
-       spin_lock(&inet_addr_hash_lock);
+       ASSERT_RTNL();
        hlist_add_head_rcu(&ifa->hash, &inet_addr_lst[hash]);
-       spin_unlock(&inet_addr_hash_lock);
 }
 
 static void inet_hash_remove(struct in_ifaddr *ifa)
 {
-       spin_lock(&inet_addr_hash_lock);
+       ASSERT_RTNL();
        hlist_del_init_rcu(&ifa->hash);
-       spin_unlock(&inet_addr_hash_lock);
 }
 
 /**
@@ -830,7 +827,7 @@ static int inet_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh)
        ifa_existing = find_matching_ifa(ifa);
        if (!ifa_existing) {
                /* It would be best to check for !NLM_F_CREATE here but
-                * userspace alreay relies on not having to provide this.
+                * userspace already relies on not having to provide this.
                 */
                set_ifa_lifetime(ifa, valid_lft, prefered_lft);
                return __inet_insert_ifa(ifa, nlh, NETLINK_CB(skb).portid);
index 8a043f03c88ecbb418b5466953abefd54c50b1d0..b10cd43a4722730205272d7822d699bc49ea71d3 100644 (file)
@@ -821,13 +821,13 @@ struct fib_info *fib_create_info(struct fib_config *cfg)
        fi = kzalloc(sizeof(*fi)+nhs*sizeof(struct fib_nh), GFP_KERNEL);
        if (fi == NULL)
                goto failure;
+       fib_info_cnt++;
        if (cfg->fc_mx) {
                fi->fib_metrics = kzalloc(sizeof(u32) * RTAX_MAX, GFP_KERNEL);
                if (!fi->fib_metrics)
                        goto failure;
        } else
                fi->fib_metrics = (u32 *) dst_default_metrics;
-       fib_info_cnt++;
 
        fi->fib_net = hold_net(net);
        fi->fib_protocol = cfg->fc_protocol;
index 250be7421ab36c50ce00a25dcd3c659ca1c97f18..fbfd829f4049a36d1d3d590ec5d913785740fb24 100644 (file)
@@ -93,28 +93,6 @@ void gre_build_header(struct sk_buff *skb, const struct tnl_ptk_info *tpi,
 }
 EXPORT_SYMBOL_GPL(gre_build_header);
 
-static __sum16 check_checksum(struct sk_buff *skb)
-{
-       __sum16 csum = 0;
-
-       switch (skb->ip_summed) {
-       case CHECKSUM_COMPLETE:
-               csum = csum_fold(skb->csum);
-
-               if (!csum)
-                       break;
-               /* Fall through. */
-
-       case CHECKSUM_NONE:
-               skb->csum = 0;
-               csum = __skb_checksum_complete(skb);
-               skb->ip_summed = CHECKSUM_COMPLETE;
-               break;
-       }
-
-       return csum;
-}
-
 static int parse_gre_header(struct sk_buff *skb, struct tnl_ptk_info *tpi,
                            bool *csum_err)
 {
@@ -141,7 +119,7 @@ static int parse_gre_header(struct sk_buff *skb, struct tnl_ptk_info *tpi,
 
        options = (__be32 *)(greh + 1);
        if (greh->flags & GRE_CSUM) {
-               if (check_checksum(skb)) {
+               if (skb_checksum_simple_validate(skb)) {
                        *csum_err = true;
                        return -EINVAL;
                }
index 0134663fdbce86f6da39d8f6c9d27ce6c404687c..79c3d947a48128a8a58b58776e99f3a6602d868c 100644 (file)
@@ -337,6 +337,7 @@ static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb)
        struct sock *sk;
        struct inet_sock *inet;
        __be32 daddr, saddr;
+       u32 mark = IP4_REPLY_MARK(net, skb->mark);
 
        if (ip_options_echo(&icmp_param->replyopts.opt.opt, skb))
                return;
@@ -349,6 +350,7 @@ static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb)
        icmp_param->data.icmph.checksum = 0;
 
        inet->tos = ip_hdr(skb)->tos;
+       sk->sk_mark = mark;
        daddr = ipc.addr = ip_hdr(skb)->saddr;
        saddr = fib_compute_spec_dst(skb);
        ipc.opt = NULL;
@@ -364,6 +366,7 @@ static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb)
        memset(&fl4, 0, sizeof(fl4));
        fl4.daddr = daddr;
        fl4.saddr = saddr;
+       fl4.flowi4_mark = mark;
        fl4.flowi4_tos = RT_TOS(ip_hdr(skb)->tos);
        fl4.flowi4_proto = IPPROTO_ICMP;
        security_skb_classify_flow(skb, flowi4_to_flowi(&fl4));
@@ -382,7 +385,7 @@ static struct rtable *icmp_route_lookup(struct net *net,
                                        struct flowi4 *fl4,
                                        struct sk_buff *skb_in,
                                        const struct iphdr *iph,
-                                       __be32 saddr, u8 tos,
+                                       __be32 saddr, u8 tos, u32 mark,
                                        int type, int code,
                                        struct icmp_bxm *param)
 {
@@ -394,6 +397,7 @@ static struct rtable *icmp_route_lookup(struct net *net,
        fl4->daddr = (param->replyopts.opt.opt.srr ?
                      param->replyopts.opt.opt.faddr : iph->saddr);
        fl4->saddr = saddr;
+       fl4->flowi4_mark = mark;
        fl4->flowi4_tos = RT_TOS(tos);
        fl4->flowi4_proto = IPPROTO_ICMP;
        fl4->fl4_icmp_type = type;
@@ -491,6 +495,7 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info)
        struct flowi4 fl4;
        __be32 saddr;
        u8  tos;
+       u32 mark;
        struct net *net;
        struct sock *sk;
 
@@ -592,6 +597,7 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info)
        tos = icmp_pointers[type].error ? ((iph->tos & IPTOS_TOS_MASK) |
                                           IPTOS_PREC_INTERNETCONTROL) :
                                          iph->tos;
+       mark = IP4_REPLY_MARK(net, skb_in->mark);
 
        if (ip_options_echo(&icmp_param->replyopts.opt.opt, skb_in))
                goto out_unlock;
@@ -608,13 +614,14 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info)
        icmp_param->skb   = skb_in;
        icmp_param->offset = skb_network_offset(skb_in);
        inet_sk(sk)->tos = tos;
+       sk->sk_mark = mark;
        ipc.addr = iph->saddr;
        ipc.opt = &icmp_param->replyopts.opt;
        ipc.tx_flags = 0;
        ipc.ttl = 0;
        ipc.tos = -1;
 
-       rt = icmp_route_lookup(net, &fl4, skb_in, iph, saddr, tos,
+       rt = icmp_route_lookup(net, &fl4, skb_in, iph, saddr, tos, mark,
                               type, code, icmp_param);
        if (IS_ERR(rt))
                goto out_unlock;
@@ -908,16 +915,8 @@ int icmp_rcv(struct sk_buff *skb)
 
        ICMP_INC_STATS_BH(net, ICMP_MIB_INMSGS);
 
-       switch (skb->ip_summed) {
-       case CHECKSUM_COMPLETE:
-               if (!csum_fold(skb->csum))
-                       break;
-               /* fall through */
-       case CHECKSUM_NONE:
-               skb->csum = 0;
-               if (__skb_checksum_complete(skb))
-                       goto csum_error;
-       }
+       if (skb_checksum_simple_validate(skb))
+               goto csum_error;
 
        if (!pskb_pull(skb, sizeof(*icmph)))
                goto error;
index 97e4d1655d26bb65121c1a8954af2d7565c288a6..17d34e3c2ac34f326cf68d74e4c7193f6d86d8e6 100644 (file)
@@ -988,16 +988,8 @@ int igmp_rcv(struct sk_buff *skb)
        if (!pskb_may_pull(skb, sizeof(struct igmphdr)))
                goto drop;
 
-       switch (skb->ip_summed) {
-       case CHECKSUM_COMPLETE:
-               if (!csum_fold(skb->csum))
-                       break;
-               /* fall through */
-       case CHECKSUM_NONE:
-               skb->csum = 0;
-               if (__skb_checksum_complete(skb))
-                       goto drop;
-       }
+       if (skb_checksum_simple_validate(skb))
+               goto drop;
 
        ih = igmp_hdr(skb);
        switch (ih->type) {
index 0d1e2cb877ec43692c5a7b4fe57e16cf921a8c97..14d02ea905b6bea37240f88054f0cd42db73c4c2 100644 (file)
@@ -29,19 +29,16 @@ const char inet_csk_timer_bug_msg[] = "inet_csk BUG: unknown timer value\n";
 EXPORT_SYMBOL(inet_csk_timer_bug_msg);
 #endif
 
-unsigned long *sysctl_local_reserved_ports;
-EXPORT_SYMBOL(sysctl_local_reserved_ports);
-
 void inet_get_local_port_range(struct net *net, int *low, int *high)
 {
        unsigned int seq;
 
        do {
-               seq = read_seqbegin(&net->ipv4.sysctl_local_ports.lock);
+               seq = read_seqbegin(&net->ipv4.ip_local_ports.lock);
 
-               *low = net->ipv4.sysctl_local_ports.range[0];
-               *high = net->ipv4.sysctl_local_ports.range[1];
-       } while (read_seqretry(&net->ipv4.sysctl_local_ports.lock, seq));
+               *low = net->ipv4.ip_local_ports.range[0];
+               *high = net->ipv4.ip_local_ports.range[1];
+       } while (read_seqretry(&net->ipv4.ip_local_ports.lock, seq));
 }
 EXPORT_SYMBOL(inet_get_local_port_range);
 
@@ -113,7 +110,7 @@ again:
 
                smallest_size = -1;
                do {
-                       if (inet_is_reserved_local_port(rover))
+                       if (inet_is_local_reserved_port(net, rover))
                                goto next_nolock;
                        head = &hashinfo->bhash[inet_bhashfn(net, rover,
                                        hashinfo->bhash_size)];
@@ -408,7 +405,7 @@ struct dst_entry *inet_csk_route_req(struct sock *sk,
        struct net *net = sock_net(sk);
        int flags = inet_sk_flowi_flags(sk);
 
-       flowi4_init_output(fl4, sk->sk_bound_dev_if, sk->sk_mark,
+       flowi4_init_output(fl4, sk->sk_bound_dev_if, ireq->ir_mark,
                           RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
                           sk->sk_protocol,
                           flags,
@@ -445,7 +442,7 @@ struct dst_entry *inet_csk_route_child_sock(struct sock *sk,
 
        rcu_read_lock();
        opt = rcu_dereference(newinet->inet_opt);
-       flowi4_init_output(fl4, sk->sk_bound_dev_if, sk->sk_mark,
+       flowi4_init_output(fl4, sk->sk_bound_dev_if, inet_rsk(req)->ir_mark,
                           RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
                           sk->sk_protocol, inet_sk_flowi_flags(sk),
                           (opt && opt->opt.srr) ? opt->opt.faddr : ireq->ir_rmt_addr,
@@ -680,6 +677,8 @@ struct sock *inet_csk_clone_lock(const struct sock *sk,
                inet_sk(newsk)->inet_sport = htons(inet_rsk(req)->ir_num);
                newsk->sk_write_space = sk_stream_write_space;
 
+               newsk->sk_mark = inet_rsk(req)->ir_mark;
+
                newicsk->icsk_retransmits = 0;
                newicsk->icsk_backoff     = 0;
                newicsk->icsk_probes_out  = 0;
index 8b9cf279450d6cf0c24e64a20fb0d05b9fb89a82..43116e8c8e1323cdd8d902c5b88e42187c8df991 100644 (file)
@@ -274,7 +274,7 @@ struct sock *__inet_lookup_established(struct net *net,
                                  const __be32 daddr, const u16 hnum,
                                  const int dif)
 {
-       INET_ADDR_COOKIE(acookie, saddr, daddr)
+       INET_ADDR_COOKIE(acookie, saddr, daddr);
        const __portpair ports = INET_COMBINED_PORTS(sport, hnum);
        struct sock *sk;
        const struct hlist_nulls_node *node;
@@ -327,7 +327,7 @@ static int __inet_check_established(struct inet_timewait_death_row *death_row,
        __be32 daddr = inet->inet_rcv_saddr;
        __be32 saddr = inet->inet_daddr;
        int dif = sk->sk_bound_dev_if;
-       INET_ADDR_COOKIE(acookie, saddr, daddr)
+       INET_ADDR_COOKIE(acookie, saddr, daddr);
        const __portpair ports = INET_COMBINED_PORTS(inet->inet_dport, lport);
        struct net *net = sock_net(sk);
        unsigned int hash = inet_ehashfn(net, daddr, lport,
@@ -500,7 +500,7 @@ int __inet_hash_connect(struct inet_timewait_death_row *death_row,
                local_bh_disable();
                for (i = 1; i <= remaining; i++) {
                        port = low + (i + offset) % remaining;
-                       if (inet_is_reserved_local_port(port))
+                       if (inet_is_local_reserved_port(net, port))
                                continue;
                        head = &hinfo->bhash[inet_bhashfn(net, port,
                                        hinfo->bhash_size)];
index 48f4244651125fb4ef7bcfda155137f287b07c77..c98cf141f4ed66aa57595423baf83b6fa92e19da 100644 (file)
@@ -120,7 +120,7 @@ int inet_peer_maxttl __read_mostly = 10 * 60 * HZ;  /* usual time to live: 10 min
 static void inetpeer_gc_worker(struct work_struct *work)
 {
        struct inet_peer *p, *n, *c;
-       LIST_HEAD(list);
+       struct list_head list;
 
        spin_lock_bh(&gc_lock);
        list_replace_init(&gc_list, &list);
index be8abe73bb9f464a2e68679255acde3b708ce84b..3a83ce5efa80e3fc2c062ec08465840018159b14 100644 (file)
 static bool ip_may_fragment(const struct sk_buff *skb)
 {
        return unlikely((ip_hdr(skb)->frag_off & htons(IP_DF)) == 0) ||
-              !skb->local_df;
+               skb->ignore_df;
 }
 
 static bool ip_exceeds_mtu(const struct sk_buff *skb, unsigned int mtu)
 {
-       if (skb->len <= mtu || skb->local_df)
+       if (skb->len <= mtu)
                return false;
 
        if (skb_is_gso(skb) && skb_gso_network_seglen(skb) <= mtu)
@@ -56,53 +56,6 @@ static bool ip_exceeds_mtu(const struct sk_buff *skb, unsigned int mtu)
        return true;
 }
 
-static bool ip_gso_exceeds_dst_mtu(const struct sk_buff *skb)
-{
-       unsigned int mtu;
-
-       if (skb->local_df || !skb_is_gso(skb))
-               return false;
-
-       mtu = ip_dst_mtu_maybe_forward(skb_dst(skb), true);
-
-       /* if seglen > mtu, do software segmentation for IP fragmentation on
-        * output.  DF bit cannot be set since ip_forward would have sent
-        * icmp error.
-        */
-       return skb_gso_network_seglen(skb) > mtu;
-}
-
-/* called if GSO skb needs to be fragmented on forward */
-static int ip_forward_finish_gso(struct sk_buff *skb)
-{
-       struct dst_entry *dst = skb_dst(skb);
-       netdev_features_t features;
-       struct sk_buff *segs;
-       int ret = 0;
-
-       features = netif_skb_dev_features(skb, dst->dev);
-       segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
-       if (IS_ERR(segs)) {
-               kfree_skb(skb);
-               return -ENOMEM;
-       }
-
-       consume_skb(skb);
-
-       do {
-               struct sk_buff *nskb = segs->next;
-               int err;
-
-               segs->next = NULL;
-               err = dst_output(segs);
-
-               if (err && ret == 0)
-                       ret = err;
-               segs = nskb;
-       } while (segs);
-
-       return ret;
-}
 
 static int ip_forward_finish(struct sk_buff *skb)
 {
@@ -114,9 +67,6 @@ static int ip_forward_finish(struct sk_buff *skb)
        if (unlikely(opt->optlen))
                ip_forward_options(skb);
 
-       if (ip_gso_exceeds_dst_mtu(skb))
-               return ip_forward_finish_gso(skb);
-
        return dst_output(skb);
 }
 
index c10a3ce5cbff0fc0bd0f23ac72188fd9e39fa83f..ed32313e307c43202a4710c6f5b74e14c19a4c20 100644 (file)
@@ -232,8 +232,9 @@ static void ip_expire(unsigned long arg)
                 * "Fragment Reassembly Timeout" message, per RFC792.
                 */
                if (qp->user == IP_DEFRAG_AF_PACKET ||
-                   (qp->user == IP_DEFRAG_CONNTRACK_IN &&
-                    skb_rtable(head)->rt_type != RTN_LOCAL))
+                   ((qp->user >= IP_DEFRAG_CONNTRACK_IN) &&
+                    (qp->user <= __IP_DEFRAG_CONNTRACK_IN_END) &&
+                    (skb_rtable(head)->rt_type != RTN_LOCAL)))
                        goto out_rcu_unlock;
 
 
index 94213c89156511d86682b2c3e034f9e5b6ef5ccb..c5a557a06a31ae589a0938d2d1a12073ae3706df 100644 (file)
@@ -410,7 +410,7 @@ static int ipgre_open(struct net_device *dev)
                struct flowi4 fl4;
                struct rtable *rt;
 
-               rt = ip_route_output_gre(dev_net(dev), &fl4,
+               rt = ip_route_output_gre(t->net, &fl4,
                                         t->parms.iph.daddr,
                                         t->parms.iph.saddr,
                                         t->parms.o_key,
@@ -434,7 +434,7 @@ static int ipgre_close(struct net_device *dev)
 
        if (ipv4_is_multicast(t->parms.iph.daddr) && t->mlink) {
                struct in_device *in_dev;
-               in_dev = inetdev_by_index(dev_net(dev), t->mlink);
+               in_dev = inetdev_by_index(t->net, t->mlink);
                if (in_dev)
                        ip_mc_dec_group(in_dev, t->parms.iph.daddr);
        }
@@ -478,7 +478,7 @@ static void __gre_tunnel_init(struct net_device *dev)
        dev->needed_headroom    = LL_MAX_HEADER + sizeof(struct iphdr) + 4;
        dev->mtu                = ETH_DATA_LEN - sizeof(struct iphdr) - 4;
 
-       dev->features           |= NETIF_F_NETNS_LOCAL | GRE_FEATURES;
+       dev->features           |= GRE_FEATURES;
        dev->hw_features        |= GRE_FEATURES;
 
        if (!(tunnel->parms.o_flags & TUNNEL_SEQ)) {
index f4ab72e19af923536656e734996f4b9201684b58..5e7aecea05cd2afbd3e3e13f417e26687517b468 100644 (file)
@@ -364,7 +364,7 @@ int ip_options_compile(struct net *net,
                        }
                        if (optptr[2] <= optlen) {
                                unsigned char *timeptr = NULL;
-                               if (optptr[2]+3 > optptr[1]) {
+                               if (optptr[2]+3 > optlen) {
                                        pp_ptr = optptr + 2;
                                        goto error;
                                }
@@ -376,7 +376,7 @@ int ip_options_compile(struct net *net,
                                        optptr[2] += 4;
                                        break;
                                case IPOPT_TS_TSANDADDR:
-                                       if (optptr[2]+7 > optptr[1]) {
+                                       if (optptr[2]+7 > optlen) {
                                                pp_ptr = optptr + 2;
                                                goto error;
                                        }
@@ -390,7 +390,7 @@ int ip_options_compile(struct net *net,
                                        optptr[2] += 8;
                                        break;
                                case IPOPT_TS_PRESPEC:
-                                       if (optptr[2]+7 > optptr[1]) {
+                                       if (optptr[2]+7 > optlen) {
                                                pp_ptr = optptr + 2;
                                                goto error;
                                        }
index 1cbeba5edff90fa1ac891d4dd23cfb65464878a4..6e231ab58d65d1c93ddb8ee315b070c833a8573c 100644 (file)
@@ -211,6 +211,48 @@ static inline int ip_finish_output2(struct sk_buff *skb)
        return -EINVAL;
 }
 
+static int ip_finish_output_gso(struct sk_buff *skb)
+{
+       netdev_features_t features;
+       struct sk_buff *segs;
+       int ret = 0;
+
+       /* common case: locally created skb or seglen is <= mtu */
+       if (((IPCB(skb)->flags & IPSKB_FORWARDED) == 0) ||
+             skb_gso_network_seglen(skb) <= ip_skb_dst_mtu(skb))
+               return ip_finish_output2(skb);
+
+       /* Slowpath -  GSO segment length is exceeding the dst MTU.
+        *
+        * This can happen in two cases:
+        * 1) TCP GRO packet, DF bit not set
+        * 2) skb arrived via virtio-net, we thus get TSO/GSO skbs directly
+        * from host network stack.
+        */
+       features = netif_skb_features(skb);
+       segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
+       if (IS_ERR(segs)) {
+               kfree_skb(skb);
+               return -ENOMEM;
+       }
+
+       consume_skb(skb);
+
+       do {
+               struct sk_buff *nskb = segs->next;
+               int err;
+
+               segs->next = NULL;
+               err = ip_fragment(segs, ip_finish_output2);
+
+               if (err && ret == 0)
+                       ret = err;
+               segs = nskb;
+       } while (segs);
+
+       return ret;
+}
+
 static int ip_finish_output(struct sk_buff *skb)
 {
 #if defined(CONFIG_NETFILTER) && defined(CONFIG_XFRM)
@@ -220,10 +262,13 @@ static int ip_finish_output(struct sk_buff *skb)
                return dst_output(skb);
        }
 #endif
-       if (skb->len > ip_skb_dst_mtu(skb) && !skb_is_gso(skb))
+       if (skb_is_gso(skb))
+               return ip_finish_output_gso(skb);
+
+       if (skb->len > ip_skb_dst_mtu(skb))
                return ip_fragment(skb, ip_finish_output2);
-       else
-               return ip_finish_output2(skb);
+
+       return ip_finish_output2(skb);
 }
 
 int ip_mc_output(struct sock *sk, struct sk_buff *skb)
@@ -370,7 +415,7 @@ packet_routed:
        skb_reset_network_header(skb);
        iph = ip_hdr(skb);
        *((__be16 *)iph) = htons((4 << 12) | (5 << 8) | (inet->tos & 0xff));
-       if (ip_dont_fragment(sk, &rt->dst) && !skb->local_df)
+       if (ip_dont_fragment(sk, &rt->dst) && !skb->ignore_df)
                iph->frag_off = htons(IP_DF);
        else
                iph->frag_off = 0;
@@ -456,7 +501,7 @@ int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
        iph = ip_hdr(skb);
 
        mtu = ip_skb_dst_mtu(skb);
-       if (unlikely(((iph->frag_off & htons(IP_DF)) && !skb->local_df) ||
+       if (unlikely(((iph->frag_off & htons(IP_DF)) && !skb->ignore_df) ||
                     (IPCB(skb)->frag_max_size &&
                      IPCB(skb)->frag_max_size > mtu))) {
                IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGFAILS);
@@ -821,7 +866,7 @@ static int __ip_append_data(struct sock *sk,
 
        fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0);
        maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen;
-       maxnonfragsize = ip_sk_local_df(sk) ? 0xFFFF : mtu;
+       maxnonfragsize = ip_sk_ignore_df(sk) ? 0xFFFF : mtu;
 
        if (cork->length + length > maxnonfragsize - fragheaderlen) {
                ip_local_error(sk, EMSGSIZE, fl4->daddr, inet->inet_dport,
@@ -1144,7 +1189,7 @@ ssize_t   ip_append_page(struct sock *sk, struct flowi4 *fl4, struct page *page,
 
        fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0);
        maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen;
-       maxnonfragsize = ip_sk_local_df(sk) ? 0xFFFF : mtu;
+       maxnonfragsize = ip_sk_ignore_df(sk) ? 0xFFFF : mtu;
 
        if (cork->length + size > maxnonfragsize - fragheaderlen) {
                ip_local_error(sk, EMSGSIZE, fl4->daddr, inet->inet_dport,
@@ -1305,10 +1350,10 @@ struct sk_buff *__ip_make_skb(struct sock *sk,
         * to fragment the frame generated here. No matter, what transforms
         * how transforms change size of the packet, it will come out.
         */
-       skb->local_df = ip_sk_local_df(sk);
+       skb->ignore_df = ip_sk_ignore_df(sk);
 
        /* DF bit is set when we want to see DF on outgoing frames.
-        * If local_df is set too, we still allow to fragment this frame
+        * If ignore_df is set too, we still allow to fragment this frame
         * locally. */
        if (inet->pmtudisc == IP_PMTUDISC_DO ||
            inet->pmtudisc == IP_PMTUDISC_PROBE ||
@@ -1501,7 +1546,8 @@ void ip_send_unicast_reply(struct net *net, struct sk_buff *skb, __be32 daddr,
                        daddr = replyopts.opt.opt.faddr;
        }
 
-       flowi4_init_output(&fl4, arg->bound_dev_if, 0,
+       flowi4_init_output(&fl4, arg->bound_dev_if,
+                          IP4_REPLY_MARK(net, skb->mark),
                           RT_TOS(arg->tos),
                           RT_SCOPE_UNIVERSE, ip_hdr(skb)->protocol,
                           ip_reply_arg_flowi_flags(arg),
index fa5b7519765f10c61b8855c6646b79915384062f..289c6ee388c1dad7613c30ed432d04b614e9f4e9 100644 (file)
@@ -395,11 +395,10 @@ static struct ip_tunnel *ip_tunnel_create(struct net *net,
                                          struct ip_tunnel_net *itn,
                                          struct ip_tunnel_parm *parms)
 {
-       struct ip_tunnel *nt, *fbt;
+       struct ip_tunnel *nt;
        struct net_device *dev;
 
        BUG_ON(!itn->fb_tunnel_dev);
-       fbt = netdev_priv(itn->fb_tunnel_dev);
        dev = __ip_tunnel_create(net, itn->fb_tunnel_dev->rtnl_link_ops, parms);
        if (IS_ERR(dev))
                return ERR_CAST(dev);
@@ -442,6 +441,8 @@ int ip_tunnel_rcv(struct ip_tunnel *tunnel, struct sk_buff *skb,
                tunnel->i_seqno = ntohl(tpi->seq) + 1;
        }
 
+       skb_reset_network_header(skb);
+
        err = IP_ECN_decapsulate(iph, skb);
        if (unlikely(err)) {
                if (log_ecn_error)
@@ -753,10 +754,8 @@ int ip_tunnel_ioctl(struct net_device *dev, struct ip_tunnel_parm *p, int cmd)
 
                if (!t && (cmd == SIOCADDTUNNEL)) {
                        t = ip_tunnel_create(net, itn, p);
-                       if (IS_ERR(t)) {
-                               err = PTR_ERR(t);
-                               break;
-                       }
+                       err = PTR_ERR_OR_ZERO(t);
+                       break;
                }
                if (dev != itn->fb_tunnel_dev && cmd == SIOCCHGTUNNEL) {
                        if (t != NULL) {
index 812b1835146255fe065264dd236e901fe6937578..4bc508f0db90287f09d6dcbaea1705ee8a9a1168 100644 (file)
@@ -486,4 +486,5 @@ static void __exit ipip_fini(void)
 module_init(ipip_init);
 module_exit(ipip_fini);
 MODULE_LICENSE("GPL");
+MODULE_ALIAS_RTNL_LINK("ipip");
 MODULE_ALIAS_NETDEV("tunl0");
index 12e13bd82b5bba4fdd183d5ba2cda098a1c0c683..b8f6381c7d0b15f49973a3748937ea23325bee03 100644 (file)
@@ -22,7 +22,6 @@
 #endif
 #include <net/netfilter/nf_conntrack_zones.h>
 
-/* Returns new sk_buff, or NULL */
 static int nf_ct_ipv4_gather_frags(struct sk_buff *skb, u_int32_t user)
 {
        int err;
@@ -33,8 +32,10 @@ static int nf_ct_ipv4_gather_frags(struct sk_buff *skb, u_int32_t user)
        err = ip_defrag(skb, user);
        local_bh_enable();
 
-       if (!err)
+       if (!err) {
                ip_send_check(ip_hdr(skb));
+               skb->ignore_df = 1;
+       }
 
        return err;
 }
index 8210964a9f19bedf17d6f3266c1fd0775f3de144..044a0ddf6a791ace04fbb1802e64563bf3fc5518 100644 (file)
@@ -236,15 +236,15 @@ exit:
 static void inet_get_ping_group_range_net(struct net *net, kgid_t *low,
                                          kgid_t *high)
 {
-       kgid_t *data = net->ipv4.sysctl_ping_group_range;
+       kgid_t *data = net->ipv4.ping_group_range.range;
        unsigned int seq;
 
        do {
-               seq = read_seqbegin(&net->ipv4.sysctl_local_ports.lock);
+               seq = read_seqbegin(&net->ipv4.ping_group_range.lock);
 
                *low = data[0];
                *high = data[1];
-       } while (read_seqretry(&net->ipv4.sysctl_local_ports.lock, seq));
+       } while (read_seqretry(&net->ipv4.ping_group_range.lock, seq));
 }
 
 
index ad737fad6d8b82dec74fab1260015e539647271d..ae0af9386f7ccf4cabafbcc8aff6c37309e820ea 100644 (file)
@@ -345,15 +345,15 @@ static void icmp_put(struct seq_file *seq)
        for (i = 0; icmpmibmap[i].name != NULL; i++)
                seq_printf(seq, " Out%s", icmpmibmap[i].name);
        seq_printf(seq, "\nIcmp: %lu %lu %lu",
-               snmp_fold_field((void __percpu **) net->mib.icmp_statistics, ICMP_MIB_INMSGS),
-               snmp_fold_field((void __percpu **) net->mib.icmp_statistics, ICMP_MIB_INERRORS),
-               snmp_fold_field((void __percpu **) net->mib.icmp_statistics, ICMP_MIB_CSUMERRORS));
+               snmp_fold_field(net->mib.icmp_statistics, ICMP_MIB_INMSGS),
+               snmp_fold_field(net->mib.icmp_statistics, ICMP_MIB_INERRORS),
+               snmp_fold_field(net->mib.icmp_statistics, ICMP_MIB_CSUMERRORS));
        for (i = 0; icmpmibmap[i].name != NULL; i++)
                seq_printf(seq, " %lu",
                           atomic_long_read(ptr + icmpmibmap[i].index));
        seq_printf(seq, " %lu %lu",
-               snmp_fold_field((void __percpu **) net->mib.icmp_statistics, ICMP_MIB_OUTMSGS),
-               snmp_fold_field((void __percpu **) net->mib.icmp_statistics, ICMP_MIB_OUTERRORS));
+               snmp_fold_field(net->mib.icmp_statistics, ICMP_MIB_OUTMSGS),
+               snmp_fold_field(net->mib.icmp_statistics, ICMP_MIB_OUTERRORS));
        for (i = 0; icmpmibmap[i].name != NULL; i++)
                seq_printf(seq, " %lu",
                           atomic_long_read(ptr + (icmpmibmap[i].index | 0x100)));
@@ -379,7 +379,7 @@ static int snmp_seq_show(struct seq_file *seq, void *v)
        BUILD_BUG_ON(offsetof(struct ipstats_mib, mibs) != 0);
        for (i = 0; snmp4_ipstats_list[i].name != NULL; i++)
                seq_printf(seq, " %llu",
-                          snmp_fold_field64((void __percpu **)net->mib.ip_statistics,
+                          snmp_fold_field64(net->mib.ip_statistics,
                                             snmp4_ipstats_list[i].entry,
                                             offsetof(struct ipstats_mib, syncp)));
 
@@ -395,11 +395,11 @@ static int snmp_seq_show(struct seq_file *seq, void *v)
                /* MaxConn field is signed, RFC 2012 */
                if (snmp4_tcp_list[i].entry == TCP_MIB_MAXCONN)
                        seq_printf(seq, " %ld",
-                                  snmp_fold_field((void __percpu **)net->mib.tcp_statistics,
+                                  snmp_fold_field(net->mib.tcp_statistics,
                                                   snmp4_tcp_list[i].entry));
                else
                        seq_printf(seq, " %lu",
-                                  snmp_fold_field((void __percpu **)net->mib.tcp_statistics,
+                                  snmp_fold_field(net->mib.tcp_statistics,
                                                   snmp4_tcp_list[i].entry));
        }
 
@@ -410,7 +410,7 @@ static int snmp_seq_show(struct seq_file *seq, void *v)
        seq_puts(seq, "\nUdp:");
        for (i = 0; snmp4_udp_list[i].name != NULL; i++)
                seq_printf(seq, " %lu",
-                          snmp_fold_field((void __percpu **)net->mib.udp_statistics,
+                          snmp_fold_field(net->mib.udp_statistics,
                                           snmp4_udp_list[i].entry));
 
        /* the UDP and UDP-Lite MIBs are the same */
@@ -421,7 +421,7 @@ static int snmp_seq_show(struct seq_file *seq, void *v)
        seq_puts(seq, "\nUdpLite:");
        for (i = 0; snmp4_udp_list[i].name != NULL; i++)
                seq_printf(seq, " %lu",
-                          snmp_fold_field((void __percpu **)net->mib.udplite_statistics,
+                          snmp_fold_field(net->mib.udplite_statistics,
                                           snmp4_udp_list[i].entry));
 
        seq_putc(seq, '\n');
@@ -458,7 +458,7 @@ static int netstat_seq_show(struct seq_file *seq, void *v)
        seq_puts(seq, "\nTcpExt:");
        for (i = 0; snmp4_net_list[i].name != NULL; i++)
                seq_printf(seq, " %lu",
-                          snmp_fold_field((void __percpu **)net->mib.net_statistics,
+                          snmp_fold_field(net->mib.net_statistics,
                                           snmp4_net_list[i].entry));
 
        seq_puts(seq, "\nIpExt:");
@@ -468,7 +468,7 @@ static int netstat_seq_show(struct seq_file *seq, void *v)
        seq_puts(seq, "\nIpExt:");
        for (i = 0; snmp4_ipextstats_list[i].name != NULL; i++)
                seq_printf(seq, " %llu",
-                          snmp_fold_field64((void __percpu **)net->mib.ip_statistics,
+                          snmp_fold_field64(net->mib.ip_statistics,
                                             snmp4_ipextstats_list[i].entry,
                                             offsetof(struct ipstats_mib, syncp)));
 
index db1e0da871f40a2284d67bd48c0f21d772b923f3..50e1e0feddfcdfcc69d641f27a2bd3596bd56098 100644 (file)
@@ -993,6 +993,9 @@ void ipv4_update_pmtu(struct sk_buff *skb, struct net *net, u32 mtu,
        struct flowi4 fl4;
        struct rtable *rt;
 
+       if (!mark)
+               mark = IP4_REPLY_MARK(net, skb->mark);
+
        __build_flow_key(&fl4, NULL, iph, oif,
                         RT_TOS(iph->tos), protocol, mark, flow_flags);
        rt = __ip_route_output_key(net, &fl4);
@@ -1010,6 +1013,10 @@ static void __ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
        struct rtable *rt;
 
        __build_flow_key(&fl4, sk, iph, 0, 0, 0, 0, 0);
+
+       if (!fl4.flowi4_mark)
+               fl4.flowi4_mark = IP4_REPLY_MARK(sock_net(sk), skb->mark);
+
        rt = __ip_route_output_key(sock_net(sk), &fl4);
        if (!IS_ERR(rt)) {
                __ip_rt_update_pmtu(rt, &fl4, mtu);
index f2ed13c2125f7d34820c9e92a3080678f30f46fd..c86624b36a62ece1dd34bf39561d52e34f467bd3 100644 (file)
@@ -303,6 +303,7 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
        ireq->ir_rmt_port       = th->source;
        ireq->ir_loc_addr       = ip_hdr(skb)->daddr;
        ireq->ir_rmt_addr       = ip_hdr(skb)->saddr;
+       ireq->ir_mark           = inet_request_mark(sk, skb);
        ireq->ecn_ok            = ecn_ok;
        ireq->snd_wscale        = tcp_opt.snd_wscale;
        ireq->sack_ok           = tcp_opt.sack_ok;
@@ -339,7 +340,7 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
         * hasn't changed since we received the original syn, but I see
         * no easy way to do this.
         */
-       flowi4_init_output(&fl4, sk->sk_bound_dev_if, sk->sk_mark,
+       flowi4_init_output(&fl4, sk->sk_bound_dev_if, ireq->ir_mark,
                           RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE, IPPROTO_TCP,
                           inet_sk_flowi_flags(sk),
                           (opt && opt->srr) ? opt->faddr : ireq->ir_rmt_addr,
index 44eba052b43d3ab49ba7630bcd82e73e5b094472..79a007c5255883f9d96011682c67ea8aac15a835 100644 (file)
@@ -45,10 +45,10 @@ static int ip_ping_group_range_max[] = { GID_T_MAX, GID_T_MAX };
 /* Update system visible IP port range */
 static void set_local_port_range(struct net *net, int range[2])
 {
-       write_seqlock(&net->ipv4.sysctl_local_ports.lock);
-       net->ipv4.sysctl_local_ports.range[0] = range[0];
-       net->ipv4.sysctl_local_ports.range[1] = range[1];
-       write_sequnlock(&net->ipv4.sysctl_local_ports.lock);
+       write_seqlock(&net->ipv4.ip_local_ports.lock);
+       net->ipv4.ip_local_ports.range[0] = range[0];
+       net->ipv4.ip_local_ports.range[1] = range[1];
+       write_sequnlock(&net->ipv4.ip_local_ports.lock);
 }
 
 /* Validate changes from /proc interface. */
@@ -57,7 +57,7 @@ static int ipv4_local_port_range(struct ctl_table *table, int write,
                                 size_t *lenp, loff_t *ppos)
 {
        struct net *net =
-               container_of(table->data, struct net, ipv4.sysctl_local_ports.range);
+               container_of(table->data, struct net, ipv4.ip_local_ports.range);
        int ret;
        int range[2];
        struct ctl_table tmp = {
@@ -87,14 +87,14 @@ static void inet_get_ping_group_range_table(struct ctl_table *table, kgid_t *low
 {
        kgid_t *data = table->data;
        struct net *net =
-               container_of(table->data, struct net, ipv4.sysctl_ping_group_range);
+               container_of(table->data, struct net, ipv4.ping_group_range.range);
        unsigned int seq;
        do {
-               seq = read_seqbegin(&net->ipv4.sysctl_local_ports.lock);
+               seq = read_seqbegin(&net->ipv4.ip_local_ports.lock);
 
                *low = data[0];
                *high = data[1];
-       } while (read_seqretry(&net->ipv4.sysctl_local_ports.lock, seq));
+       } while (read_seqretry(&net->ipv4.ip_local_ports.lock, seq));
 }
 
 /* Update system visible IP port range */
@@ -102,11 +102,11 @@ static void set_ping_group_range(struct ctl_table *table, kgid_t low, kgid_t hig
 {
        kgid_t *data = table->data;
        struct net *net =
-               container_of(table->data, struct net, ipv4.sysctl_ping_group_range);
-       write_seqlock(&net->ipv4.sysctl_local_ports.lock);
+               container_of(table->data, struct net, ipv4.ping_group_range.range);
+       write_seqlock(&net->ipv4.ip_local_ports.lock);
        data[0] = low;
        data[1] = high;
-       write_sequnlock(&net->ipv4.sysctl_local_ports.lock);
+       write_sequnlock(&net->ipv4.ip_local_ports.lock);
 }
 
 /* Validate changes from /proc interface. */
@@ -436,13 +436,6 @@ static struct ctl_table ipv4_table[] = {
                .mode           = 0644,
                .proc_handler   = proc_dointvec
        },
-       {
-               .procname       = "ip_local_reserved_ports",
-               .data           = NULL, /* initialized in sysctl_ipv4_init */
-               .maxlen         = 65536,
-               .mode           = 0644,
-               .proc_handler   = proc_do_large_bitmap,
-       },
        {
                .procname       = "igmp_max_memberships",
                .data           = &sysctl_igmp_max_memberships,
@@ -805,7 +798,7 @@ static struct ctl_table ipv4_net_table[] = {
        },
        {
                .procname       = "ping_group_range",
-               .data           = &init_net.ipv4.sysctl_ping_group_range,
+               .data           = &init_net.ipv4.ping_group_range.range,
                .maxlen         = sizeof(gid_t)*2,
                .mode           = 0644,
                .proc_handler   = ipv4_ping_group_range,
@@ -819,11 +812,18 @@ static struct ctl_table ipv4_net_table[] = {
        },
        {
                .procname       = "ip_local_port_range",
-               .maxlen         = sizeof(init_net.ipv4.sysctl_local_ports.range),
-               .data           = &init_net.ipv4.sysctl_local_ports.range,
+               .maxlen         = sizeof(init_net.ipv4.ip_local_ports.range),
+               .data           = &init_net.ipv4.ip_local_ports.range,
                .mode           = 0644,
                .proc_handler   = ipv4_local_port_range,
        },
+       {
+               .procname       = "ip_local_reserved_ports",
+               .data           = &init_net.ipv4.sysctl_local_reserved_ports,
+               .maxlen         = 65536,
+               .mode           = 0644,
+               .proc_handler   = proc_do_large_bitmap,
+       },
        {
                .procname       = "ip_no_pmtu_disc",
                .data           = &init_net.ipv4.sysctl_ip_no_pmtu_disc,
@@ -838,6 +838,20 @@ static struct ctl_table ipv4_net_table[] = {
                .mode           = 0644,
                .proc_handler   = proc_dointvec,
        },
+       {
+               .procname       = "fwmark_reflect",
+               .data           = &init_net.ipv4.sysctl_fwmark_reflect,
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec,
+       },
+       {
+               .procname       = "tcp_fwmark_accept",
+               .data           = &init_net.ipv4.sysctl_tcp_fwmark_accept,
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec,
+       },
        { }
 };
 
@@ -858,26 +872,18 @@ static __net_init int ipv4_sysctl_init_net(struct net *net)
                        table[i].data += (void *)net - (void *)&init_net;
        }
 
-       /*
-        * Sane defaults - nobody may create ping sockets.
-        * Boot scripts should set this to distro-specific group.
-        */
-       net->ipv4.sysctl_ping_group_range[0] = make_kgid(&init_user_ns, 1);
-       net->ipv4.sysctl_ping_group_range[1] = make_kgid(&init_user_ns, 0);
-
-       /*
-        * Set defaults for local port range
-        */
-       seqlock_init(&net->ipv4.sysctl_local_ports.lock);
-       net->ipv4.sysctl_local_ports.range[0] =  32768;
-       net->ipv4.sysctl_local_ports.range[1] =  61000;
-
        net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", table);
        if (net->ipv4.ipv4_hdr == NULL)
                goto err_reg;
 
+       net->ipv4.sysctl_local_reserved_ports = kzalloc(65536 / 8, GFP_KERNEL);
+       if (!net->ipv4.sysctl_local_reserved_ports)
+               goto err_ports;
+
        return 0;
 
+err_ports:
+       unregister_net_sysctl_table(net->ipv4.ipv4_hdr);
 err_reg:
        if (!net_eq(net, &init_net))
                kfree(table);
@@ -889,6 +895,7 @@ static __net_exit void ipv4_sysctl_exit_net(struct net *net)
 {
        struct ctl_table *table;
 
+       kfree(net->ipv4.sysctl_local_reserved_ports);
        table = net->ipv4.ipv4_hdr->ctl_table_arg;
        unregister_net_sysctl_table(net->ipv4.ipv4_hdr);
        kfree(table);
@@ -902,16 +909,6 @@ static __net_initdata struct pernet_operations ipv4_sysctl_ops = {
 static __init int sysctl_ipv4_init(void)
 {
        struct ctl_table_header *hdr;
-       struct ctl_table *i;
-
-       for (i = ipv4_table; i->procname; i++) {
-               if (strcmp(i->procname, "ip_local_reserved_ports") == 0) {
-                       i->data = sysctl_local_reserved_ports;
-                       break;
-               }
-       }
-       if (!i->procname)
-               return -EINVAL;
 
        hdr = register_net_sysctl(&init_net, "net/ipv4", ipv4_table);
        if (hdr == NULL)
index 821846fb0a7e211fc870b1afce5991bc3de28494..d5de69bc04f581ac589ae0f6fe7fcc3646b2cc3e 100644 (file)
@@ -140,13 +140,12 @@ static inline void bictcp_update(struct bictcp *ca, u32 cwnd)
                ca->cnt = 1;
 }
 
-static void bictcp_cong_avoid(struct sock *sk, u32 ack, u32 acked,
-                             u32 in_flight)
+static void bictcp_cong_avoid(struct sock *sk, u32 ack, u32 acked)
 {
        struct tcp_sock *tp = tcp_sk(sk);
        struct bictcp *ca = inet_csk_ca(sk);
 
-       if (!tcp_is_cwnd_limited(sk, in_flight))
+       if (!tcp_is_cwnd_limited(sk))
                return;
 
        if (tp->snd_cwnd <= tp->snd_ssthresh)
index 2b9464c93b8859fcbef0f900f70a4bed2dc6e617..7b09d8b49fa51271cc0fa99a3fcda9f017c0f469 100644 (file)
@@ -276,26 +276,6 @@ int tcp_set_congestion_control(struct sock *sk, const char *name)
        return err;
 }
 
-/* RFC2861 Check whether we are limited by application or congestion window
- * This is the inverse of cwnd check in tcp_tso_should_defer
- */
-bool tcp_is_cwnd_limited(const struct sock *sk, u32 in_flight)
-{
-       const struct tcp_sock *tp = tcp_sk(sk);
-       u32 left;
-
-       if (in_flight >= tp->snd_cwnd)
-               return true;
-
-       left = tp->snd_cwnd - in_flight;
-       if (sk_can_gso(sk) &&
-           left * sysctl_tcp_tso_win_divisor < tp->snd_cwnd &&
-           left < tp->xmit_size_goal_segs)
-               return true;
-       return left <= tcp_max_tso_deferred_mss(tp);
-}
-EXPORT_SYMBOL_GPL(tcp_is_cwnd_limited);
-
 /* Slow start is used when congestion window is no greater than the slow start
  * threshold. We base on RFC2581 and also handle stretch ACKs properly.
  * We do not implement RFC3465 Appropriate Byte Counting (ABC) per se but
@@ -337,11 +317,11 @@ EXPORT_SYMBOL_GPL(tcp_cong_avoid_ai);
 /* This is Jacobson's slow start and congestion avoidance.
  * SIGCOMM '88, p. 328.
  */
-void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked, u32 in_flight)
+void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked)
 {
        struct tcp_sock *tp = tcp_sk(sk);
 
-       if (!tcp_is_cwnd_limited(sk, in_flight))
+       if (!tcp_is_cwnd_limited(sk))
                return;
 
        /* In "safe" area, increase. */
index 8bf224516ba2a26a661d16f89aaee32301d09397..a9bd8a4828a9e5c2da275626c11c6c953a1b3dd6 100644 (file)
@@ -304,13 +304,12 @@ static inline void bictcp_update(struct bictcp *ca, u32 cwnd)
                ca->cnt = 1;
 }
 
-static void bictcp_cong_avoid(struct sock *sk, u32 ack, u32 acked,
-                             u32 in_flight)
+static void bictcp_cong_avoid(struct sock *sk, u32 ack, u32 acked)
 {
        struct tcp_sock *tp = tcp_sk(sk);
        struct bictcp *ca = inet_csk_ca(sk);
 
-       if (!tcp_is_cwnd_limited(sk, in_flight))
+       if (!tcp_is_cwnd_limited(sk))
                return;
 
        if (tp->snd_cwnd <= tp->snd_ssthresh) {
@@ -409,7 +408,7 @@ static void bictcp_acked(struct sock *sk, u32 cnt, s32 rtt_us)
                ratio -= ca->delayed_ack >> ACK_RATIO_SHIFT;
                ratio += cnt;
 
-               ca->delayed_ack = min(ratio, ACK_RATIO_LIMIT);
+               ca->delayed_ack = clamp(ratio, 1U, ACK_RATIO_LIMIT);
        }
 
        /* Some calls are for duplicates without timetamps */
index f195d9316e55d3d7ef6dc5285606ea9de0f52a51..62e48cf84e602a005ab2ce61c058ca709702098a 100644 (file)
@@ -72,25 +72,224 @@ error:             kfree(ctx);
        return err;
 }
 
-/* Computes the fastopen cookie for the IP path.
- * The path is a 128 bits long (pad with zeros for IPv4).
- *
- * The caller must check foc->len to determine if a valid cookie
- * has been generated successfully.
-*/
-void tcp_fastopen_cookie_gen(__be32 src, __be32 dst,
-                            struct tcp_fastopen_cookie *foc)
+static bool __tcp_fastopen_cookie_gen(const void *path,
+                                     struct tcp_fastopen_cookie *foc)
 {
-       __be32 path[4] = { src, dst, 0, 0 };
        struct tcp_fastopen_context *ctx;
+       bool ok = false;
 
        tcp_fastopen_init_key_once(true);
 
        rcu_read_lock();
        ctx = rcu_dereference(tcp_fastopen_ctx);
        if (ctx) {
-               crypto_cipher_encrypt_one(ctx->tfm, foc->val, (__u8 *)path);
+               crypto_cipher_encrypt_one(ctx->tfm, foc->val, path);
                foc->len = TCP_FASTOPEN_COOKIE_SIZE;
+               ok = true;
        }
        rcu_read_unlock();
+       return ok;
+}
+
+/* Generate the fastopen cookie by doing aes128 encryption on both
+ * the source and destination addresses. Pad 0s for IPv4 or IPv4-mapped-IPv6
+ * addresses. For the longer IPv6 addresses use CBC-MAC.
+ *
+ * XXX (TFO) - refactor when TCP_FASTOPEN_COOKIE_SIZE != AES_BLOCK_SIZE.
+ */
+static bool tcp_fastopen_cookie_gen(struct request_sock *req,
+                                   struct sk_buff *syn,
+                                   struct tcp_fastopen_cookie *foc)
+{
+       if (req->rsk_ops->family == AF_INET) {
+               const struct iphdr *iph = ip_hdr(syn);
+
+               __be32 path[4] = { iph->saddr, iph->daddr, 0, 0 };
+               return __tcp_fastopen_cookie_gen(path, foc);
+       }
+
+#if IS_ENABLED(CONFIG_IPV6)
+       if (req->rsk_ops->family == AF_INET6) {
+               const struct ipv6hdr *ip6h = ipv6_hdr(syn);
+               struct tcp_fastopen_cookie tmp;
+
+               if (__tcp_fastopen_cookie_gen(&ip6h->saddr, &tmp)) {
+                       struct in6_addr *buf = (struct in6_addr *) tmp.val;
+                       int i = 4;
+
+                       for (i = 0; i < 4; i++)
+                               buf->s6_addr32[i] ^= ip6h->daddr.s6_addr32[i];
+                       return __tcp_fastopen_cookie_gen(buf, foc);
+               }
+       }
+#endif
+       return false;
+}
+
+static bool tcp_fastopen_create_child(struct sock *sk,
+                                     struct sk_buff *skb,
+                                     struct dst_entry *dst,
+                                     struct request_sock *req)
+{
+       struct tcp_sock *tp = tcp_sk(sk);
+       struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue;
+       struct sock *child;
+
+       req->num_retrans = 0;
+       req->num_timeout = 0;
+       req->sk = NULL;
+
+       child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL);
+       if (child == NULL)
+               return false;
+
+       spin_lock(&queue->fastopenq->lock);
+       queue->fastopenq->qlen++;
+       spin_unlock(&queue->fastopenq->lock);
+
+       /* Initialize the child socket. Have to fix some values to take
+        * into account the child is a Fast Open socket and is created
+        * only out of the bits carried in the SYN packet.
+        */
+       tp = tcp_sk(child);
+
+       tp->fastopen_rsk = req;
+       /* Do a hold on the listner sk so that if the listener is being
+        * closed, the child that has been accepted can live on and still
+        * access listen_lock.
+        */
+       sock_hold(sk);
+       tcp_rsk(req)->listener = sk;
+
+       /* RFC1323: The window in SYN & SYN/ACK segments is never
+        * scaled. So correct it appropriately.
+        */
+       tp->snd_wnd = ntohs(tcp_hdr(skb)->window);
+
+       /* Activate the retrans timer so that SYNACK can be retransmitted.
+        * The request socket is not added to the SYN table of the parent
+        * because it's been added to the accept queue directly.
+        */
+       inet_csk_reset_xmit_timer(child, ICSK_TIME_RETRANS,
+                                 TCP_TIMEOUT_INIT, TCP_RTO_MAX);
+
+       /* Add the child socket directly into the accept queue */
+       inet_csk_reqsk_queue_add(sk, req, child);
+
+       /* Now finish processing the fastopen child socket. */
+       inet_csk(child)->icsk_af_ops->rebuild_header(child);
+       tcp_init_congestion_control(child);
+       tcp_mtup_init(child);
+       tcp_init_metrics(child);
+       tcp_init_buffer_space(child);
+
+       /* Queue the data carried in the SYN packet. We need to first
+        * bump skb's refcnt because the caller will attempt to free it.
+        *
+        * XXX (TFO) - we honor a zero-payload TFO request for now,
+        * (any reason not to?) but no need to queue the skb since
+        * there is no data. How about SYN+FIN?
+        */
+       if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq + 1) {
+               skb = skb_get(skb);
+               skb_dst_drop(skb);
+               __skb_pull(skb, tcp_hdr(skb)->doff * 4);
+               skb_set_owner_r(skb, child);
+               __skb_queue_tail(&child->sk_receive_queue, skb);
+               tp->syn_data_acked = 1;
+       }
+       tcp_rsk(req)->rcv_nxt = tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
+       sk->sk_data_ready(sk);
+       bh_unlock_sock(child);
+       sock_put(child);
+       WARN_ON(req->sk == NULL);
+       return true;
+}
+EXPORT_SYMBOL(tcp_fastopen_create_child);
+
+static bool tcp_fastopen_queue_check(struct sock *sk)
+{
+       struct fastopen_queue *fastopenq;
+
+       /* Make sure the listener has enabled fastopen, and we don't
+        * exceed the max # of pending TFO requests allowed before trying
+        * to validating the cookie in order to avoid burning CPU cycles
+        * unnecessarily.
+        *
+        * XXX (TFO) - The implication of checking the max_qlen before
+        * processing a cookie request is that clients can't differentiate
+        * between qlen overflow causing Fast Open to be disabled
+        * temporarily vs a server not supporting Fast Open at all.
+        */
+       fastopenq = inet_csk(sk)->icsk_accept_queue.fastopenq;
+       if (fastopenq == NULL || fastopenq->max_qlen == 0)
+               return false;
+
+       if (fastopenq->qlen >= fastopenq->max_qlen) {
+               struct request_sock *req1;
+               spin_lock(&fastopenq->lock);
+               req1 = fastopenq->rskq_rst_head;
+               if ((req1 == NULL) || time_after(req1->expires, jiffies)) {
+                       spin_unlock(&fastopenq->lock);
+                       NET_INC_STATS_BH(sock_net(sk),
+                                        LINUX_MIB_TCPFASTOPENLISTENOVERFLOW);
+                       return false;
+               }
+               fastopenq->rskq_rst_head = req1->dl_next;
+               fastopenq->qlen--;
+               spin_unlock(&fastopenq->lock);
+               reqsk_free(req1);
+       }
+       return true;
+}
+
+/* Returns true if we should perform Fast Open on the SYN. The cookie (foc)
+ * may be updated and return the client in the SYN-ACK later. E.g., Fast Open
+ * cookie request (foc->len == 0).
+ */
+bool tcp_try_fastopen(struct sock *sk, struct sk_buff *skb,
+                     struct request_sock *req,
+                     struct tcp_fastopen_cookie *foc,
+                     struct dst_entry *dst)
+{
+       struct tcp_fastopen_cookie valid_foc = { .len = -1 };
+       bool syn_data = TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq + 1;
+
+       if (!((sysctl_tcp_fastopen & TFO_SERVER_ENABLE) &&
+             (syn_data || foc->len >= 0) &&
+             tcp_fastopen_queue_check(sk))) {
+               foc->len = -1;
+               return false;
+       }
+
+       if (syn_data && (sysctl_tcp_fastopen & TFO_SERVER_COOKIE_NOT_REQD))
+               goto fastopen;
+
+       if (tcp_fastopen_cookie_gen(req, skb, &valid_foc) &&
+           foc->len == TCP_FASTOPEN_COOKIE_SIZE &&
+           foc->len == valid_foc.len &&
+           !memcmp(foc->val, valid_foc.val, foc->len)) {
+               /* Cookie is valid. Create a (full) child socket to accept
+                * the data in SYN before returning a SYN-ACK to ack the
+                * data. If we fail to create the socket, fall back and
+                * ack the ISN only but includes the same cookie.
+                *
+                * Note: Data-less SYN with valid cookie is allowed to send
+                * data in SYN_RECV state.
+                */
+fastopen:
+               if (tcp_fastopen_create_child(sk, skb, dst, req)) {
+                       foc->len = -1;
+                       NET_INC_STATS_BH(sock_net(sk),
+                                        LINUX_MIB_TCPFASTOPENPASSIVE);
+                       return true;
+               }
+       }
+
+       NET_INC_STATS_BH(sock_net(sk), foc->len ?
+                        LINUX_MIB_TCPFASTOPENPASSIVEFAIL :
+                        LINUX_MIB_TCPFASTOPENCOOKIEREQD);
+       *foc = valid_foc;
+       return false;
 }
+EXPORT_SYMBOL(tcp_try_fastopen);
index 8b9e7bad77c09a0c07706b955c29b81d4e71a542..1c4908280d921fbe9a9e21e4fd55d1a87f2db706 100644 (file)
@@ -109,12 +109,12 @@ static void hstcp_init(struct sock *sk)
        tp->snd_cwnd_clamp = min_t(u32, tp->snd_cwnd_clamp, 0xffffffff/128);
 }
 
-static void hstcp_cong_avoid(struct sock *sk, u32 ack, u32 acked, u32 in_flight)
+static void hstcp_cong_avoid(struct sock *sk, u32 ack, u32 acked)
 {
        struct tcp_sock *tp = tcp_sk(sk);
        struct hstcp *ca = inet_csk_ca(sk);
 
-       if (!tcp_is_cwnd_limited(sk, in_flight))
+       if (!tcp_is_cwnd_limited(sk))
                return;
 
        if (tp->snd_cwnd <= tp->snd_ssthresh)
index 4a194acfd9237f1aaadfe9f9831358f5c9037cf7..031361311a8b92b1f7ab1172fb00e3bbb0503129 100644 (file)
@@ -227,12 +227,12 @@ static u32 htcp_recalc_ssthresh(struct sock *sk)
        return max((tp->snd_cwnd * ca->beta) >> 7, 2U);
 }
 
-static void htcp_cong_avoid(struct sock *sk, u32 ack, u32 acked, u32 in_flight)
+static void htcp_cong_avoid(struct sock *sk, u32 ack, u32 acked)
 {
        struct tcp_sock *tp = tcp_sk(sk);
        struct htcp *ca = inet_csk_ca(sk);
 
-       if (!tcp_is_cwnd_limited(sk, in_flight))
+       if (!tcp_is_cwnd_limited(sk))
                return;
 
        if (tp->snd_cwnd <= tp->snd_ssthresh)
index a15a799bf76888f3633b27cf57e8e05f30339601..d8f8f05a49516ec2d9213f10af77b82fbf32bff7 100644 (file)
@@ -87,8 +87,7 @@ static inline u32 hybla_fraction(u32 odds)
  *     o Give cwnd a new value based on the model proposed
  *     o remember increments <1
  */
-static void hybla_cong_avoid(struct sock *sk, u32 ack, u32 acked,
-                            u32 in_flight)
+static void hybla_cong_avoid(struct sock *sk, u32 ack, u32 acked)
 {
        struct tcp_sock *tp = tcp_sk(sk);
        struct hybla *ca = inet_csk_ca(sk);
@@ -101,11 +100,11 @@ static void hybla_cong_avoid(struct sock *sk, u32 ack, u32 acked,
                ca->minrtt_us = tp->srtt_us;
        }
 
-       if (!tcp_is_cwnd_limited(sk, in_flight))
+       if (!tcp_is_cwnd_limited(sk))
                return;
 
        if (!ca->hybla_en) {
-               tcp_reno_cong_avoid(sk, ack, acked, in_flight);
+               tcp_reno_cong_avoid(sk, ack, acked);
                return;
        }
 
index 863d105e30150391e9ac3ce8545c0411e4d083ab..5999b3972e6449d616facb628d27116ab8876ac2 100644 (file)
@@ -255,8 +255,7 @@ static void tcp_illinois_state(struct sock *sk, u8 new_state)
 /*
  * Increase window in response to successful acknowledgment.
  */
-static void tcp_illinois_cong_avoid(struct sock *sk, u32 ack, u32 acked,
-                                   u32 in_flight)
+static void tcp_illinois_cong_avoid(struct sock *sk, u32 ack, u32 acked)
 {
        struct tcp_sock *tp = tcp_sk(sk);
        struct illinois *ca = inet_csk_ca(sk);
@@ -265,7 +264,7 @@ static void tcp_illinois_cong_avoid(struct sock *sk, u32 ack, u32 acked,
                update_params(sk);
 
        /* RFC2861 only increase cwnd if fully utilized */
-       if (!tcp_is_cwnd_limited(sk, in_flight))
+       if (!tcp_is_cwnd_limited(sk))
                return;
 
        /* In slow start */
index 6efed134ab63dc68ccd069acb2c4e4345cdc6a99..350b2072f0ab82776f196dfbe9c5b87b80a79046 100644 (file)
@@ -2938,10 +2938,11 @@ static void tcp_synack_rtt_meas(struct sock *sk, const u32 synack_stamp)
                tcp_ack_update_rtt(sk, FLAG_SYN_ACKED, seq_rtt_us, -1L);
 }
 
-static void tcp_cong_avoid(struct sock *sk, u32 ack, u32 acked, u32 in_flight)
+static void tcp_cong_avoid(struct sock *sk, u32 ack, u32 acked)
 {
        const struct inet_connection_sock *icsk = inet_csk(sk);
-       icsk->icsk_ca_ops->cong_avoid(sk, ack, acked, in_flight);
+
+       icsk->icsk_ca_ops->cong_avoid(sk, ack, acked);
        tcp_sk(sk)->snd_cwnd_stamp = tcp_time_stamp;
 }
 
@@ -3364,7 +3365,6 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
        u32 ack_seq = TCP_SKB_CB(skb)->seq;
        u32 ack = TCP_SKB_CB(skb)->ack_seq;
        bool is_dupack = false;
-       u32 prior_in_flight;
        u32 prior_fackets;
        int prior_packets = tp->packets_out;
        const int prior_unsacked = tp->packets_out - tp->sacked_out;
@@ -3397,7 +3397,6 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
                flag |= FLAG_SND_UNA_ADVANCED;
 
        prior_fackets = tp->fackets_out;
-       prior_in_flight = tcp_packets_in_flight(tp);
 
        /* ts_recent update must be made after we are sure that the packet
         * is in window.
@@ -3452,7 +3451,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
 
        /* Advance cwnd if state allows */
        if (tcp_may_raise_cwnd(sk, flag))
-               tcp_cong_avoid(sk, ack, acked, prior_in_flight);
+               tcp_cong_avoid(sk, ack, acked);
 
        if (tcp_ack_is_dubious(sk, flag)) {
                is_dupack = !(flag & (FLAG_SND_UNA_ADVANCED | FLAG_NOT_DUP));
index 438f3b95143df0bffa01322c62b44c8fcd6d6b8b..77cccda1ad0c6dc62c8cb70d932eca2322304c81 100644 (file)
@@ -336,8 +336,8 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
        const int code = icmp_hdr(icmp_skb)->code;
        struct sock *sk;
        struct sk_buff *skb;
-       struct request_sock *req;
-       __u32 seq;
+       struct request_sock *fastopen;
+       __u32 seq, snd_una;
        __u32 remaining;
        int err;
        struct net *net = dev_net(icmp_skb->dev);
@@ -378,12 +378,12 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
 
        icsk = inet_csk(sk);
        tp = tcp_sk(sk);
-       req = tp->fastopen_rsk;
        seq = ntohl(th->seq);
+       /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
+       fastopen = tp->fastopen_rsk;
+       snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
        if (sk->sk_state != TCP_LISTEN &&
-           !between(seq, tp->snd_una, tp->snd_nxt) &&
-           (req == NULL || seq != tcp_rsk(req)->snt_isn)) {
-               /* For a Fast Open socket, allow seq to be snt_isn. */
+           !between(seq, snd_una, tp->snd_nxt)) {
                NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
                goto out;
        }
@@ -426,11 +426,9 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
                if (code != ICMP_NET_UNREACH && code != ICMP_HOST_UNREACH)
                        break;
                if (seq != tp->snd_una  || !icsk->icsk_retransmits ||
-                   !icsk->icsk_backoff)
+                   !icsk->icsk_backoff || fastopen)
                        break;
 
-               /* XXX (TFO) - revisit the following logic for TFO */
-
                if (sock_owned_by_user(sk))
                        break;
 
@@ -462,14 +460,6 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
                goto out;
        }
 
-       /* XXX (TFO) - if it's a TFO socket and has been accepted, rather
-        * than following the TCP_SYN_RECV case and closing the socket,
-        * we ignore the ICMP error and keep trying like a fully established
-        * socket. Is this the right thing to do?
-        */
-       if (req && req->sk == NULL)
-               goto out;
-
        switch (sk->sk_state) {
                struct request_sock *req, **prev;
        case TCP_LISTEN:
@@ -502,10 +492,13 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
                goto out;
 
        case TCP_SYN_SENT:
-       case TCP_SYN_RECV:  /* Cannot happen.
-                              It can f.e. if SYNs crossed,
-                              or Fast Open.
-                            */
+       case TCP_SYN_RECV:
+               /* Only in fast or simultaneous open. If a fast open socket is
+                * is already accepted it is treated as a connected one below.
+                */
+               if (fastopen && fastopen->sk == NULL)
+                       break;
+
                if (!sock_owned_by_user(sk)) {
                        sk->sk_err = err;
 
@@ -822,7 +815,8 @@ static void tcp_v4_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
  */
 static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst,
                              struct request_sock *req,
-                             u16 queue_mapping)
+                             u16 queue_mapping,
+                             struct tcp_fastopen_cookie *foc)
 {
        const struct inet_request_sock *ireq = inet_rsk(req);
        struct flowi4 fl4;
@@ -833,7 +827,7 @@ static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst,
        if (!dst && (dst = inet_csk_route_req(sk, &fl4, req)) == NULL)
                return -1;
 
-       skb = tcp_make_synack(sk, dst, req, NULL);
+       skb = tcp_make_synack(sk, dst, req, foc);
 
        if (skb) {
                __tcp_v4_send_check(skb, ireq->ir_loc_addr, ireq->ir_rmt_addr);
@@ -852,7 +846,7 @@ static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst,
 
 static int tcp_v4_rtx_synack(struct sock *sk, struct request_sock *req)
 {
-       int res = tcp_v4_send_synack(sk, NULL, req, 0);
+       int res = tcp_v4_send_synack(sk, NULL, req, 0, NULL);
 
        if (!res) {
                TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
@@ -1260,187 +1254,6 @@ static const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
 };
 #endif
 
-static bool tcp_fastopen_check(struct sock *sk, struct sk_buff *skb,
-                              struct request_sock *req,
-                              struct tcp_fastopen_cookie *foc,
-                              struct tcp_fastopen_cookie *valid_foc)
-{
-       bool skip_cookie = false;
-       struct fastopen_queue *fastopenq;
-
-       if (likely(!fastopen_cookie_present(foc))) {
-               /* See include/net/tcp.h for the meaning of these knobs */
-               if ((sysctl_tcp_fastopen & TFO_SERVER_ALWAYS) ||
-                   ((sysctl_tcp_fastopen & TFO_SERVER_COOKIE_NOT_REQD) &&
-                   (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq + 1)))
-                       skip_cookie = true; /* no cookie to validate */
-               else
-                       return false;
-       }
-       fastopenq = inet_csk(sk)->icsk_accept_queue.fastopenq;
-       /* A FO option is present; bump the counter. */
-       NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPFASTOPENPASSIVE);
-
-       /* Make sure the listener has enabled fastopen, and we don't
-        * exceed the max # of pending TFO requests allowed before trying
-        * to validating the cookie in order to avoid burning CPU cycles
-        * unnecessarily.
-        *
-        * XXX (TFO) - The implication of checking the max_qlen before
-        * processing a cookie request is that clients can't differentiate
-        * between qlen overflow causing Fast Open to be disabled
-        * temporarily vs a server not supporting Fast Open at all.
-        */
-       if ((sysctl_tcp_fastopen & TFO_SERVER_ENABLE) == 0 ||
-           fastopenq == NULL || fastopenq->max_qlen == 0)
-               return false;
-
-       if (fastopenq->qlen >= fastopenq->max_qlen) {
-               struct request_sock *req1;
-               spin_lock(&fastopenq->lock);
-               req1 = fastopenq->rskq_rst_head;
-               if ((req1 == NULL) || time_after(req1->expires, jiffies)) {
-                       spin_unlock(&fastopenq->lock);
-                       NET_INC_STATS_BH(sock_net(sk),
-                           LINUX_MIB_TCPFASTOPENLISTENOVERFLOW);
-                       /* Avoid bumping LINUX_MIB_TCPFASTOPENPASSIVEFAIL*/
-                       foc->len = -1;
-                       return false;
-               }
-               fastopenq->rskq_rst_head = req1->dl_next;
-               fastopenq->qlen--;
-               spin_unlock(&fastopenq->lock);
-               reqsk_free(req1);
-       }
-       if (skip_cookie) {
-               tcp_rsk(req)->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
-               return true;
-       }
-
-       if (foc->len == TCP_FASTOPEN_COOKIE_SIZE) {
-               if ((sysctl_tcp_fastopen & TFO_SERVER_COOKIE_NOT_CHKED) == 0) {
-                       tcp_fastopen_cookie_gen(ip_hdr(skb)->saddr,
-                                               ip_hdr(skb)->daddr, valid_foc);
-                       if ((valid_foc->len != TCP_FASTOPEN_COOKIE_SIZE) ||
-                           memcmp(&foc->val[0], &valid_foc->val[0],
-                           TCP_FASTOPEN_COOKIE_SIZE) != 0)
-                               return false;
-                       valid_foc->len = -1;
-               }
-               /* Acknowledge the data received from the peer. */
-               tcp_rsk(req)->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
-               return true;
-       } else if (foc->len == 0) { /* Client requesting a cookie */
-               tcp_fastopen_cookie_gen(ip_hdr(skb)->saddr,
-                                       ip_hdr(skb)->daddr, valid_foc);
-               NET_INC_STATS_BH(sock_net(sk),
-                   LINUX_MIB_TCPFASTOPENCOOKIEREQD);
-       } else {
-               /* Client sent a cookie with wrong size. Treat it
-                * the same as invalid and return a valid one.
-                */
-               tcp_fastopen_cookie_gen(ip_hdr(skb)->saddr,
-                                       ip_hdr(skb)->daddr, valid_foc);
-       }
-       return false;
-}
-
-static int tcp_v4_conn_req_fastopen(struct sock *sk,
-                                   struct sk_buff *skb,
-                                   struct sk_buff *skb_synack,
-                                   struct request_sock *req)
-{
-       struct tcp_sock *tp = tcp_sk(sk);
-       struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue;
-       const struct inet_request_sock *ireq = inet_rsk(req);
-       struct sock *child;
-       int err;
-
-       req->num_retrans = 0;
-       req->num_timeout = 0;
-       req->sk = NULL;
-
-       child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL);
-       if (child == NULL) {
-               NET_INC_STATS_BH(sock_net(sk),
-                                LINUX_MIB_TCPFASTOPENPASSIVEFAIL);
-               kfree_skb(skb_synack);
-               return -1;
-       }
-       err = ip_build_and_send_pkt(skb_synack, sk, ireq->ir_loc_addr,
-                                   ireq->ir_rmt_addr, ireq->opt);
-       err = net_xmit_eval(err);
-       if (!err)
-               tcp_rsk(req)->snt_synack = tcp_time_stamp;
-       /* XXX (TFO) - is it ok to ignore error and continue? */
-
-       spin_lock(&queue->fastopenq->lock);
-       queue->fastopenq->qlen++;
-       spin_unlock(&queue->fastopenq->lock);
-
-       /* Initialize the child socket. Have to fix some values to take
-        * into account the child is a Fast Open socket and is created
-        * only out of the bits carried in the SYN packet.
-        */
-       tp = tcp_sk(child);
-
-       tp->fastopen_rsk = req;
-       /* Do a hold on the listner sk so that if the listener is being
-        * closed, the child that has been accepted can live on and still
-        * access listen_lock.
-        */
-       sock_hold(sk);
-       tcp_rsk(req)->listener = sk;
-
-       /* RFC1323: The window in SYN & SYN/ACK segments is never
-        * scaled. So correct it appropriately.
-        */
-       tp->snd_wnd = ntohs(tcp_hdr(skb)->window);
-
-       /* Activate the retrans timer so that SYNACK can be retransmitted.
-        * The request socket is not added to the SYN table of the parent
-        * because it's been added to the accept queue directly.
-        */
-       inet_csk_reset_xmit_timer(child, ICSK_TIME_RETRANS,
-           TCP_TIMEOUT_INIT, TCP_RTO_MAX);
-
-       /* Add the child socket directly into the accept queue */
-       inet_csk_reqsk_queue_add(sk, req, child);
-
-       /* Now finish processing the fastopen child socket. */
-       inet_csk(child)->icsk_af_ops->rebuild_header(child);
-       tcp_init_congestion_control(child);
-       tcp_mtup_init(child);
-       tcp_init_metrics(child);
-       tcp_init_buffer_space(child);
-
-       /* Queue the data carried in the SYN packet. We need to first
-        * bump skb's refcnt because the caller will attempt to free it.
-        *
-        * XXX (TFO) - we honor a zero-payload TFO request for now.
-        * (Any reason not to?)
-        */
-       if (TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq + 1) {
-               /* Don't queue the skb if there is no payload in SYN.
-                * XXX (TFO) - How about SYN+FIN?
-                */
-               tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
-       } else {
-               skb = skb_get(skb);
-               skb_dst_drop(skb);
-               __skb_pull(skb, tcp_hdr(skb)->doff * 4);
-               skb_set_owner_r(skb, child);
-               __skb_queue_tail(&child->sk_receive_queue, skb);
-               tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
-               tp->syn_data_acked = 1;
-       }
-       sk->sk_data_ready(sk);
-       bh_unlock_sock(child);
-       sock_put(child);
-       WARN_ON(req->sk == NULL);
-       return 0;
-}
-
 int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
 {
        struct tcp_options_received tmp_opt;
@@ -1451,12 +1264,10 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
        __be32 saddr = ip_hdr(skb)->saddr;
        __be32 daddr = ip_hdr(skb)->daddr;
        __u32 isn = TCP_SKB_CB(skb)->when;
-       bool want_cookie = false;
+       bool want_cookie = false, fastopen;
        struct flowi4 fl4;
        struct tcp_fastopen_cookie foc = { .len = -1 };
-       struct tcp_fastopen_cookie valid_foc = { .len = -1 };
-       struct sk_buff *skb_synack;
-       int do_fastopen;
+       int err;
 
        /* Never answer to SYNs send to broadcast or multicast */
        if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
@@ -1507,6 +1318,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
        ireq->ir_rmt_addr = saddr;
        ireq->no_srccheck = inet_sk(sk)->transparent;
        ireq->opt = tcp_v4_save_options(skb);
+       ireq->ir_mark = inet_request_mark(sk, skb);
 
        if (security_inet_conn_request(sk, skb, req))
                goto drop_and_free;
@@ -1555,52 +1367,24 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
 
                isn = tcp_v4_init_sequence(skb);
        }
-       tcp_rsk(req)->snt_isn = isn;
-
-       if (dst == NULL) {
-               dst = inet_csk_route_req(sk, &fl4, req);
-               if (dst == NULL)
-                       goto drop_and_free;
-       }
-       do_fastopen = tcp_fastopen_check(sk, skb, req, &foc, &valid_foc);
-
-       /* We don't call tcp_v4_send_synack() directly because we need
-        * to make sure a child socket can be created successfully before
-        * sending back synack!
-        *
-        * XXX (TFO) - Ideally one would simply call tcp_v4_send_synack()
-        * (or better yet, call tcp_send_synack() in the child context
-        * directly, but will have to fix bunch of other code first)
-        * after syn_recv_sock() except one will need to first fix the
-        * latter to remove its dependency on the current implementation
-        * of tcp_v4_send_synack()->tcp_select_initial_window().
-        */
-       skb_synack = tcp_make_synack(sk, dst, req,
-           fastopen_cookie_present(&valid_foc) ? &valid_foc : NULL);
-
-       if (skb_synack) {
-               __tcp_v4_send_check(skb_synack, ireq->ir_loc_addr, ireq->ir_rmt_addr);
-               skb_set_queue_mapping(skb_synack, skb_get_queue_mapping(skb));
-       } else
+       if (!dst && (dst = inet_csk_route_req(sk, &fl4, req)) == NULL)
                goto drop_and_free;
 
-       if (likely(!do_fastopen)) {
-               int err;
-               err = ip_build_and_send_pkt(skb_synack, sk, ireq->ir_loc_addr,
-                    ireq->ir_rmt_addr, ireq->opt);
-               err = net_xmit_eval(err);
+       tcp_rsk(req)->snt_isn = isn;
+       tcp_rsk(req)->snt_synack = tcp_time_stamp;
+       tcp_openreq_init_rwin(req, sk, dst);
+       fastopen = !want_cookie &&
+                  tcp_try_fastopen(sk, skb, req, &foc, dst);
+       err = tcp_v4_send_synack(sk, dst, req,
+                                skb_get_queue_mapping(skb), &foc);
+       if (!fastopen) {
                if (err || want_cookie)
                        goto drop_and_free;
 
                tcp_rsk(req)->snt_synack = tcp_time_stamp;
                tcp_rsk(req)->listener = NULL;
-               /* Add the request_sock to the SYN table */
                inet_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
-               if (fastopen_cookie_present(&foc) && foc.len != 0)
-                       NET_INC_STATS_BH(sock_net(sk),
-                           LINUX_MIB_TCPFASTOPENPASSIVEFAIL);
-       } else if (tcp_v4_conn_req_fastopen(sk, skb, skb_synack, req))
-               goto drop_and_free;
+       }
 
        return 0;
 
@@ -1744,28 +1528,6 @@ static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
        return sk;
 }
 
-static __sum16 tcp_v4_checksum_init(struct sk_buff *skb)
-{
-       const struct iphdr *iph = ip_hdr(skb);
-
-       if (skb->ip_summed == CHECKSUM_COMPLETE) {
-               if (!tcp_v4_check(skb->len, iph->saddr,
-                                 iph->daddr, skb->csum)) {
-                       skb->ip_summed = CHECKSUM_UNNECESSARY;
-                       return 0;
-               }
-       }
-
-       skb->csum = csum_tcpudp_nofold(iph->saddr, iph->daddr,
-                                      skb->len, IPPROTO_TCP, 0);
-
-       if (skb->len <= 76) {
-               return __skb_checksum_complete(skb);
-       }
-       return 0;
-}
-
-
 /* The socket must have it's spinlock held when we get
  * here.
  *
@@ -1960,7 +1722,8 @@ int tcp_v4_rcv(struct sk_buff *skb)
         * Packet length and doff are validated by header prediction,
         * provided case of th->doff==0 is eliminated.
         * So, we defer the checks. */
-       if (!skb_csum_unnecessary(skb) && tcp_v4_checksum_init(skb))
+
+       if (skb_checksum_init(skb, IPPROTO_TCP, inet_compute_pseudo))
                goto csum_error;
 
        th = tcp_hdr(skb);
index c9aecae313276d134ef56d1385ac44266c200a51..1e70fa8fa793fdbca3ca782231f8b775ed96f205 100644 (file)
@@ -115,13 +115,12 @@ static void tcp_lp_init(struct sock *sk)
  * Will only call newReno CA when away from inference.
  * From TCP-LP's paper, this will be handled in additive increasement.
  */
-static void tcp_lp_cong_avoid(struct sock *sk, u32 ack, u32 acked,
-                             u32 in_flight)
+static void tcp_lp_cong_avoid(struct sock *sk, u32 ack, u32 acked)
 {
        struct lp *lp = inet_csk_ca(sk);
 
        if (!(lp->flag & LP_WITHIN_INF))
-               tcp_reno_cong_avoid(sk, ack, acked, in_flight);
+               tcp_reno_cong_avoid(sk, ack, acked);
 }
 
 /**
index 05c1b155251d39d2e559d050f1e191f51f32b3e0..e68e0d4af6c97bcd0f8c983890ba555adbfb3a00 100644 (file)
@@ -362,6 +362,37 @@ void tcp_twsk_destructor(struct sock *sk)
 }
 EXPORT_SYMBOL_GPL(tcp_twsk_destructor);
 
+void tcp_openreq_init_rwin(struct request_sock *req,
+                          struct sock *sk, struct dst_entry *dst)
+{
+       struct inet_request_sock *ireq = inet_rsk(req);
+       struct tcp_sock *tp = tcp_sk(sk);
+       __u8 rcv_wscale;
+       int mss = dst_metric_advmss(dst);
+
+       if (tp->rx_opt.user_mss && tp->rx_opt.user_mss < mss)
+               mss = tp->rx_opt.user_mss;
+
+       /* Set this up on the first call only */
+       req->window_clamp = tp->window_clamp ? : dst_metric(dst, RTAX_WINDOW);
+
+       /* limit the window selection if the user enforce a smaller rx buffer */
+       if (sk->sk_userlocks & SOCK_RCVBUF_LOCK &&
+           (req->window_clamp > tcp_full_space(sk) || req->window_clamp == 0))
+               req->window_clamp = tcp_full_space(sk);
+
+       /* tcp_full_space because it is guaranteed to be the first packet */
+       tcp_select_initial_window(tcp_full_space(sk),
+               mss - (ireq->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0),
+               &req->rcv_wnd,
+               &req->window_clamp,
+               ireq->wscale_ok,
+               &rcv_wscale,
+               dst_metric(dst, RTAX_INITRWND));
+       ireq->rcv_wscale = rcv_wscale;
+}
+EXPORT_SYMBOL(tcp_openreq_init_rwin);
+
 static inline void TCP_ECN_openreq_child(struct tcp_sock *tp,
                                         struct request_sock *req)
 {
index 29dde97c3c413cec5ee63696acc1211fb3a59c7e..d463c35db33d8a8873bef9ebf51a9a0ab112fd51 100644 (file)
@@ -627,7 +627,7 @@ static unsigned int tcp_synack_options(struct sock *sk,
                if (unlikely(!ireq->tstamp_ok))
                        remaining -= TCPOLEN_SACKPERM_ALIGNED;
        }
-       if (foc != NULL) {
+       if (foc != NULL && foc->len >= 0) {
                u32 need = TCPOLEN_EXP_FASTOPEN_BASE + foc->len;
                need = (need + 3) & ~3U;  /* Align to 32 bits */
                if (remaining >= need) {
@@ -878,15 +878,8 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
        BUG_ON(!skb || !tcp_skb_pcount(skb));
 
        if (clone_it) {
-               const struct sk_buff *fclone = skb + 1;
-
                skb_mstamp_get(&skb->skb_mstamp);
 
-               if (unlikely(skb->fclone == SKB_FCLONE_ORIG &&
-                            fclone->fclone == SKB_FCLONE_CLONE))
-                       NET_INC_STATS(sock_net(sk),
-                                     LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES);
-
                if (unlikely(skb_cloned(skb)))
                        skb = pskb_copy(skb, gfp_mask);
                else
@@ -1409,12 +1402,21 @@ static void tcp_cwnd_application_limited(struct sock *sk)
        tp->snd_cwnd_stamp = tcp_time_stamp;
 }
 
-/* Congestion window validation. (RFC2861) */
-static void tcp_cwnd_validate(struct sock *sk)
+static void tcp_cwnd_validate(struct sock *sk, bool is_cwnd_limited)
 {
        struct tcp_sock *tp = tcp_sk(sk);
 
-       if (tp->packets_out >= tp->snd_cwnd) {
+       /* Track the maximum number of outstanding packets in each
+        * window, and remember whether we were cwnd-limited then.
+        */
+       if (!before(tp->snd_una, tp->max_packets_seq) ||
+           tp->packets_out > tp->max_packets_out) {
+               tp->max_packets_out = tp->packets_out;
+               tp->max_packets_seq = tp->snd_nxt;
+               tp->is_cwnd_limited = is_cwnd_limited;
+       }
+
+       if (tcp_is_cwnd_limited(sk)) {
                /* Network is feed fully. */
                tp->snd_cwnd_used = 0;
                tp->snd_cwnd_stamp = tcp_time_stamp;
@@ -1666,7 +1668,8 @@ static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len,
  *
  * This algorithm is from John Heffner.
  */
-static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb)
+static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb,
+                                bool *is_cwnd_limited)
 {
        struct tcp_sock *tp = tcp_sk(sk);
        const struct inet_connection_sock *icsk = inet_csk(sk);
@@ -1730,6 +1733,9 @@ static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb)
        if (!tp->tso_deferred)
                tp->tso_deferred = 1 | (jiffies << 1);
 
+       if (cong_win < send_win && cong_win < skb->len)
+               *is_cwnd_limited = true;
+
        return true;
 
 send_now:
@@ -1890,6 +1896,7 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
        unsigned int tso_segs, sent_pkts;
        int cwnd_quota;
        int result;
+       bool is_cwnd_limited = false;
 
        sent_pkts = 0;
 
@@ -1914,6 +1921,7 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
 
                cwnd_quota = tcp_cwnd_test(tp, skb);
                if (!cwnd_quota) {
+                       is_cwnd_limited = true;
                        if (push_one == 2)
                                /* Force out a loss probe pkt. */
                                cwnd_quota = 1;
@@ -1930,7 +1938,8 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
                                                      nonagle : TCP_NAGLE_PUSH))))
                                break;
                } else {
-                       if (!push_one && tcp_tso_should_defer(sk, skb))
+                       if (!push_one &&
+                           tcp_tso_should_defer(sk, skb, &is_cwnd_limited))
                                break;
                }
 
@@ -1997,7 +2006,7 @@ repair:
                /* Send one loss probe per tail loss episode. */
                if (push_one != 2)
                        tcp_schedule_loss_probe(sk);
-               tcp_cwnd_validate(sk);
+               tcp_cwnd_validate(sk, is_cwnd_limited);
                return false;
        }
        return (push_one == 2) || (!tp->packets_out && tcp_send_head(sk));
@@ -2061,6 +2070,25 @@ bool tcp_schedule_loss_probe(struct sock *sk)
        return true;
 }
 
+/* Thanks to skb fast clones, we can detect if a prior transmit of
+ * a packet is still in a qdisc or driver queue.
+ * In this case, there is very little point doing a retransmit !
+ * Note: This is called from BH context only.
+ */
+static bool skb_still_in_host_queue(const struct sock *sk,
+                                   const struct sk_buff *skb)
+{
+       const struct sk_buff *fclone = skb + 1;
+
+       if (unlikely(skb->fclone == SKB_FCLONE_ORIG &&
+                    fclone->fclone == SKB_FCLONE_CLONE)) {
+               NET_INC_STATS_BH(sock_net(sk),
+                                LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES);
+               return true;
+       }
+       return false;
+}
+
 /* When probe timeout (PTO) fires, send a new segment if one exists, else
  * retransmit the last segment.
  */
@@ -2086,6 +2114,9 @@ void tcp_send_loss_probe(struct sock *sk)
        if (WARN_ON(!skb))
                goto rearm_timer;
 
+       if (skb_still_in_host_queue(sk, skb))
+               goto rearm_timer;
+
        pcount = tcp_skb_pcount(skb);
        if (WARN_ON(!pcount))
                goto rearm_timer;
@@ -2407,6 +2438,9 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
            min(sk->sk_wmem_queued + (sk->sk_wmem_queued >> 2), sk->sk_sndbuf))
                return -EAGAIN;
 
+       if (skb_still_in_host_queue(sk, skb))
+               return -EBUSY;
+
        if (before(TCP_SKB_CB(skb)->seq, tp->snd_una)) {
                if (before(TCP_SKB_CB(skb)->end_seq, tp->snd_una))
                        BUG();
@@ -2463,8 +2497,14 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
                err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
        }
 
-       if (likely(!err))
+       if (likely(!err)) {
                TCP_SKB_CB(skb)->sacked |= TCPCB_EVER_RETRANS;
+               /* Update global TCP statistics. */
+               TCP_INC_STATS(sock_net(sk), TCP_MIB_RETRANSSEGS);
+               if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)
+                       NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSYNRETRANS);
+               tp->total_retrans++;
+       }
        return err;
 }
 
@@ -2474,12 +2514,6 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
        int err = __tcp_retransmit_skb(sk, skb);
 
        if (err == 0) {
-               /* Update global TCP statistics. */
-               TCP_INC_STATS(sock_net(sk), TCP_MIB_RETRANSSEGS);
-               if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)
-                       NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSYNRETRANS);
-               tp->total_retrans++;
-
 #if FASTRETRANS_DEBUG > 0
                if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) {
                        net_dbg_ratelimited("retrans_out leaked\n");
@@ -2500,7 +2534,7 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
                 * see tcp_input.c tcp_sacktag_write_queue().
                 */
                TCP_SKB_CB(skb)->ack_seq = tp->snd_nxt;
-       } else {
+       } else if (err != -EBUSY) {
                NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRETRANSFAIL);
        }
        return err;
@@ -2778,27 +2812,6 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
        if (tp->rx_opt.user_mss && tp->rx_opt.user_mss < mss)
                mss = tp->rx_opt.user_mss;
 
-       if (req->rcv_wnd == 0) { /* ignored for retransmitted syns */
-               __u8 rcv_wscale;
-               /* Set this up on the first call only */
-               req->window_clamp = tp->window_clamp ? : dst_metric(dst, RTAX_WINDOW);
-
-               /* limit the window selection if the user enforce a smaller rx buffer */
-               if (sk->sk_userlocks & SOCK_RCVBUF_LOCK &&
-                   (req->window_clamp > tcp_full_space(sk) || req->window_clamp == 0))
-                       req->window_clamp = tcp_full_space(sk);
-
-               /* tcp_full_space because it is guaranteed to be the first packet */
-               tcp_select_initial_window(tcp_full_space(sk),
-                       mss - (ireq->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0),
-                       &req->rcv_wnd,
-                       &req->window_clamp,
-                       ireq->wscale_ok,
-                       &rcv_wscale,
-                       dst_metric(dst, RTAX_INITRWND));
-               ireq->rcv_wscale = rcv_wscale;
-       }
-
        memset(&opts, 0, sizeof(opts));
 #ifdef CONFIG_SYN_COOKIES
        if (unlikely(req->cookie_ts))
index 0ac50836da4d42832f3aa35c9a4cebbf79f69981..8250949b88538db4e0f2d318050518855e74cf3d 100644 (file)
 #define TCP_SCALABLE_AI_CNT    50U
 #define TCP_SCALABLE_MD_SCALE  3
 
-static void tcp_scalable_cong_avoid(struct sock *sk, u32 ack, u32 acked,
-                                   u32 in_flight)
+static void tcp_scalable_cong_avoid(struct sock *sk, u32 ack, u32 acked)
 {
        struct tcp_sock *tp = tcp_sk(sk);
 
-       if (!tcp_is_cwnd_limited(sk, in_flight))
+       if (!tcp_is_cwnd_limited(sk))
                return;
 
        if (tp->snd_cwnd <= tp->snd_ssthresh)
index 48539fff6357a4e778c537b99bb9a7fd49eb43b3..9a5e05f27f4f7cce16cb285698cb0fc73ef49a91 100644 (file)
@@ -163,14 +163,13 @@ static inline u32 tcp_vegas_ssthresh(struct tcp_sock *tp)
        return  min(tp->snd_ssthresh, tp->snd_cwnd-1);
 }
 
-static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 acked,
-                                u32 in_flight)
+static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 acked)
 {
        struct tcp_sock *tp = tcp_sk(sk);
        struct vegas *vegas = inet_csk_ca(sk);
 
        if (!vegas->doing_vegas_now) {
-               tcp_reno_cong_avoid(sk, ack, acked, in_flight);
+               tcp_reno_cong_avoid(sk, ack, acked);
                return;
        }
 
@@ -195,7 +194,7 @@ static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 acked,
                        /* We don't have enough RTT samples to do the Vegas
                         * calculation, so we'll behave like Reno.
                         */
-                       tcp_reno_cong_avoid(sk, ack, acked, in_flight);
+                       tcp_reno_cong_avoid(sk, ack, acked);
                } else {
                        u32 rtt, diff;
                        u64 target_cwnd;
index 1b8e28fcd7e1cab3edd586db1b716742a4402fd7..27b9825753d15d89717d7b76e1c38fae4758cfbc 100644 (file)
@@ -114,19 +114,18 @@ static void tcp_veno_cwnd_event(struct sock *sk, enum tcp_ca_event event)
                tcp_veno_init(sk);
 }
 
-static void tcp_veno_cong_avoid(struct sock *sk, u32 ack, u32 acked,
-                               u32 in_flight)
+static void tcp_veno_cong_avoid(struct sock *sk, u32 ack, u32 acked)
 {
        struct tcp_sock *tp = tcp_sk(sk);
        struct veno *veno = inet_csk_ca(sk);
 
        if (!veno->doing_veno_now) {
-               tcp_reno_cong_avoid(sk, ack, acked, in_flight);
+               tcp_reno_cong_avoid(sk, ack, acked);
                return;
        }
 
        /* limited by applications */
-       if (!tcp_is_cwnd_limited(sk, in_flight))
+       if (!tcp_is_cwnd_limited(sk))
                return;
 
        /* We do the Veno calculations only if we got enough rtt samples */
@@ -134,7 +133,7 @@ static void tcp_veno_cong_avoid(struct sock *sk, u32 ack, u32 acked,
                /* We don't have enough rtt samples to do the Veno
                 * calculation, so we'll behave like Reno.
                 */
-               tcp_reno_cong_avoid(sk, ack, acked, in_flight);
+               tcp_reno_cong_avoid(sk, ack, acked);
        } else {
                u64 target_cwnd;
                u32 rtt;
index 5ede0e727945add71904a2d3c57d334e77e94baf..599b79b8eac07298a34cff43ae87e546bdb36847 100644 (file)
@@ -69,13 +69,12 @@ static void tcp_yeah_pkts_acked(struct sock *sk, u32 pkts_acked, s32 rtt_us)
        tcp_vegas_pkts_acked(sk, pkts_acked, rtt_us);
 }
 
-static void tcp_yeah_cong_avoid(struct sock *sk, u32 ack, u32 acked,
-                               u32 in_flight)
+static void tcp_yeah_cong_avoid(struct sock *sk, u32 ack, u32 acked)
 {
        struct tcp_sock *tp = tcp_sk(sk);
        struct yeah *yeah = inet_csk_ca(sk);
 
-       if (!tcp_is_cwnd_limited(sk, in_flight))
+       if (!tcp_is_cwnd_limited(sk))
                return;
 
        if (tp->snd_cwnd <= tp->snd_ssthresh)
index 4468e1adc094a1f6f12eb20cf7c79a9b9187826d..590532a7bd2d39c582f95d47abba30e45cdbddfe 100644 (file)
@@ -246,7 +246,7 @@ int udp_lib_get_port(struct sock *sk, unsigned short snum,
                        do {
                                if (low <= snum && snum <= high &&
                                    !test_bit(snum >> udptable->log, bitmap) &&
-                                   !inet_is_reserved_local_port(snum))
+                                   !inet_is_local_reserved_port(net, snum))
                                        goto found;
                                snum += rand;
                        } while (snum != first);
@@ -1495,6 +1495,10 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
                if (skb->len > sizeof(struct udphdr) && encap_rcv != NULL) {
                        int ret;
 
+                       /* Verify checksum before giving to encap */
+                       if (udp_lib_checksum_complete(skb))
+                               goto csum_error;
+
                        ret = encap_rcv(sk, skb);
                        if (ret <= 0) {
                                UDP_INC_STATS_BH(sock_net(sk),
@@ -1672,7 +1676,6 @@ static int __udp4_lib_mcast_deliver(struct net *net, struct sk_buff *skb,
 static inline int udp4_csum_init(struct sk_buff *skb, struct udphdr *uh,
                                 int proto)
 {
-       const struct iphdr *iph;
        int err;
 
        UDP_SKB_CB(skb)->partial_cov = 0;
@@ -1684,22 +1687,8 @@ static inline int udp4_csum_init(struct sk_buff *skb, struct udphdr *uh,
                        return err;
        }
 
-       iph = ip_hdr(skb);
-       if (uh->check == 0) {
-               skb->ip_summed = CHECKSUM_UNNECESSARY;
-       } else if (skb->ip_summed == CHECKSUM_COMPLETE) {
-               if (!csum_tcpudp_magic(iph->saddr, iph->daddr, skb->len,
-                                     proto, skb->csum))
-                       skb->ip_summed = CHECKSUM_UNNECESSARY;
-       }
-       if (!skb_csum_unnecessary(skb))
-               skb->csum = csum_tcpudp_nofold(iph->saddr, iph->daddr,
-                                              skb->len, proto, 0);
-       /* Probably, we should checksum udp header (it should be in cache
-        * in any case) and data in tiny packets (< rx copybreak).
-        */
-
-       return 0;
+       return skb_checksum_init_zero_check(skb, proto, uh->check,
+                                           inet_compute_pseudo);
 }
 
 /*
@@ -1886,7 +1875,7 @@ static struct sock *__udp4_lib_demux_lookup(struct net *net,
        unsigned int hash2 = udp4_portaddr_hash(net, loc_addr, hnum);
        unsigned int slot2 = hash2 & udp_table.mask;
        struct udp_hslot *hslot2 = &udp_table.hash2[slot2];
-       INET_ADDR_COOKIE(acookie, rmt_addr, loc_addr)
+       INET_ADDR_COOKIE(acookie, rmt_addr, loc_addr);
        const __portpair ports = INET_COMBINED_PORTS(rmt_port, hnum);
 
        rcu_read_lock();
index 40e701f2e1e0324af6f0af781ac6715866ad88d3..8e8c018d9d2d142fcfef2f3bdc1c486e363e10c0 100644 (file)
@@ -25,7 +25,7 @@ static int xfrm4_tunnel_check_size(struct sk_buff *skb)
        if (IPCB(skb)->flags & IPSKB_XFRM_TUNNEL_SIZE)
                goto out;
 
-       if (!(ip_hdr(skb)->frag_off & htons(IP_DF)) || skb->local_df)
+       if (!(ip_hdr(skb)->frag_off & htons(IP_DF)) || skb->ignore_df)
                goto out;
 
        mtu = dst_mtu(skb_dst(skb));
index 6c7fa0853fc74ef179b00de52d78aecee342e18b..5667b3003af9b51779ff322717e999282113c4b7 100644 (file)
@@ -275,19 +275,14 @@ static int snmp6_alloc_dev(struct inet6_dev *idev)
 {
        int i;
 
-       if (snmp_mib_init((void __percpu **)idev->stats.ipv6,
-                         sizeof(struct ipstats_mib),
-                         __alignof__(struct ipstats_mib)) < 0)
+       idev->stats.ipv6 = alloc_percpu(struct ipstats_mib);
+       if (!idev->stats.ipv6)
                goto err_ip;
 
        for_each_possible_cpu(i) {
                struct ipstats_mib *addrconf_stats;
-               addrconf_stats = per_cpu_ptr(idev->stats.ipv6[0], i);
+               addrconf_stats = per_cpu_ptr(idev->stats.ipv6, i);
                u64_stats_init(&addrconf_stats->syncp);
-#if SNMP_ARRAY_SZ == 2
-               addrconf_stats = per_cpu_ptr(idev->stats.ipv6[1], i);
-               u64_stats_init(&addrconf_stats->syncp);
-#endif
        }
 
 
@@ -305,7 +300,7 @@ static int snmp6_alloc_dev(struct inet6_dev *idev)
 err_icmpmsg:
        kfree(idev->stats.icmpv6dev);
 err_icmp:
-       snmp_mib_free((void __percpu **)idev->stats.ipv6);
+       free_percpu(idev->stats.ipv6);
 err_ip:
        return -ENOMEM;
 }
@@ -2504,8 +2499,8 @@ static int inet6_addr_add(struct net *net, int ifindex,
        return PTR_ERR(ifp);
 }
 
-static int inet6_addr_del(struct net *net, int ifindex, const struct in6_addr *pfx,
-                         unsigned int plen)
+static int inet6_addr_del(struct net *net, int ifindex, u32 ifa_flags,
+                         const struct in6_addr *pfx, unsigned int plen)
 {
        struct inet6_ifaddr *ifp;
        struct inet6_dev *idev;
@@ -2528,7 +2523,12 @@ static int inet6_addr_del(struct net *net, int ifindex, const struct in6_addr *p
                        in6_ifa_hold(ifp);
                        read_unlock_bh(&idev->lock);
 
+                       if (!(ifp->flags & IFA_F_TEMPORARY) &&
+                           (ifa_flags & IFA_F_MANAGETEMPADDR))
+                               manage_tempaddrs(idev, ifp, 0, 0, false,
+                                                jiffies);
                        ipv6_del_addr(ifp);
+                       addrconf_verify_rtnl();
                        return 0;
                }
        }
@@ -2568,7 +2568,7 @@ int addrconf_del_ifaddr(struct net *net, void __user *arg)
                return -EFAULT;
 
        rtnl_lock();
-       err = inet6_addr_del(net, ireq.ifr6_ifindex, &ireq.ifr6_addr,
+       err = inet6_addr_del(net, ireq.ifr6_ifindex, 0, &ireq.ifr6_addr,
                             ireq.ifr6_prefixlen);
        rtnl_unlock();
        return err;
@@ -2813,18 +2813,6 @@ static void addrconf_gre_config(struct net_device *dev)
 }
 #endif
 
-static inline int
-ipv6_inherit_linklocal(struct inet6_dev *idev, struct net_device *link_dev)
-{
-       struct in6_addr lladdr;
-
-       if (!ipv6_get_lladdr(link_dev, &lladdr, IFA_F_TENTATIVE)) {
-               addrconf_add_linklocal(idev, &lladdr);
-               return 0;
-       }
-       return -1;
-}
-
 static int addrconf_notify(struct notifier_block *this, unsigned long event,
                           void *ptr)
 {
@@ -3743,6 +3731,7 @@ inet6_rtm_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh)
        struct ifaddrmsg *ifm;
        struct nlattr *tb[IFA_MAX+1];
        struct in6_addr *pfx, *peer_pfx;
+       u32 ifa_flags;
        int err;
 
        err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, ifa_ipv6_policy);
@@ -3754,7 +3743,13 @@ inet6_rtm_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh)
        if (pfx == NULL)
                return -EINVAL;
 
-       return inet6_addr_del(net, ifm->ifa_index, pfx, ifm->ifa_prefixlen);
+       ifa_flags = tb[IFA_FLAGS] ? nla_get_u32(tb[IFA_FLAGS]) : ifm->ifa_flags;
+
+       /* We ignore other flags so far. */
+       ifa_flags &= IFA_F_MANAGETEMPADDR;
+
+       return inet6_addr_del(net, ifm->ifa_index, ifa_flags, pfx,
+                             ifm->ifa_prefixlen);
 }
 
 static int inet6_addr_modify(struct inet6_ifaddr *ifp, u32 ifa_flags,
@@ -4363,7 +4358,7 @@ static inline void __snmp6_fill_statsdev(u64 *stats, atomic_long_t *mib,
        memset(&stats[items], 0, pad);
 }
 
-static inline void __snmp6_fill_stats64(u64 *stats, void __percpu **mib,
+static inline void __snmp6_fill_stats64(u64 *stats, void __percpu *mib,
                                      int items, int bytes, size_t syncpoff)
 {
        int i;
@@ -4383,7 +4378,7 @@ static void snmp6_fill_stats(u64 *stats, struct inet6_dev *idev, int attrtype,
 {
        switch (attrtype) {
        case IFLA_INET6_STATS:
-               __snmp6_fill_stats64(stats, (void __percpu **)idev->stats.ipv6,
+               __snmp6_fill_stats64(stats, idev->stats.ipv6,
                                     IPSTATS_MIB_MAX, bytes, offsetof(struct ipstats_mib, syncp));
                break;
        case IFLA_INET6_ICMP6STATS:
index 4c11cbcf83089152052b60072dbe11d147d10803..e6960457f62582c4ca41c674cc531d64ca76b038 100644 (file)
@@ -123,7 +123,7 @@ static void snmp6_free_dev(struct inet6_dev *idev)
 {
        kfree(idev->stats.icmpv6msgdev);
        kfree(idev->stats.icmpv6dev);
-       snmp_mib_free((void __percpu **)idev->stats.ipv6);
+       free_percpu(idev->stats.ipv6);
 }
 
 /* Nobody refers to this device, we may destroy it. */
index d935889f1008ae93ff1efe52badeedf41352f257..dc47cc757b80b6df3fbe99f7d2ead9a335689126 100644 (file)
@@ -715,33 +715,25 @@ static int __net_init ipv6_init_mibs(struct net *net)
 {
        int i;
 
-       if (snmp_mib_init((void __percpu **)net->mib.udp_stats_in6,
-                         sizeof(struct udp_mib),
-                         __alignof__(struct udp_mib)) < 0)
+       net->mib.udp_stats_in6 = alloc_percpu(struct udp_mib);
+       if (!net->mib.udp_stats_in6)
                return -ENOMEM;
-       if (snmp_mib_init((void __percpu **)net->mib.udplite_stats_in6,
-                         sizeof(struct udp_mib),
-                         __alignof__(struct udp_mib)) < 0)
+       net->mib.udplite_stats_in6 = alloc_percpu(struct udp_mib);
+       if (!net->mib.udplite_stats_in6)
                goto err_udplite_mib;
-       if (snmp_mib_init((void __percpu **)net->mib.ipv6_statistics,
-                         sizeof(struct ipstats_mib),
-                         __alignof__(struct ipstats_mib)) < 0)
+       net->mib.ipv6_statistics = alloc_percpu(struct ipstats_mib);
+       if (!net->mib.ipv6_statistics)
                goto err_ip_mib;
 
        for_each_possible_cpu(i) {
                struct ipstats_mib *af_inet6_stats;
-               af_inet6_stats = per_cpu_ptr(net->mib.ipv6_statistics[0], i);
+               af_inet6_stats = per_cpu_ptr(net->mib.ipv6_statistics, i);
                u64_stats_init(&af_inet6_stats->syncp);
-#if SNMP_ARRAY_SZ == 2
-               af_inet6_stats = per_cpu_ptr(net->mib.ipv6_statistics[1], i);
-               u64_stats_init(&af_inet6_stats->syncp);
-#endif
        }
 
 
-       if (snmp_mib_init((void __percpu **)net->mib.icmpv6_statistics,
-                         sizeof(struct icmpv6_mib),
-                         __alignof__(struct icmpv6_mib)) < 0)
+       net->mib.icmpv6_statistics = alloc_percpu(struct icmpv6_mib);
+       if (!net->mib.icmpv6_statistics)
                goto err_icmp_mib;
        net->mib.icmpv6msg_statistics = kzalloc(sizeof(struct icmpv6msg_mib),
                                                GFP_KERNEL);
@@ -750,22 +742,22 @@ static int __net_init ipv6_init_mibs(struct net *net)
        return 0;
 
 err_icmpmsg_mib:
-       snmp_mib_free((void __percpu **)net->mib.icmpv6_statistics);
+       free_percpu(net->mib.icmpv6_statistics);
 err_icmp_mib:
-       snmp_mib_free((void __percpu **)net->mib.ipv6_statistics);
+       free_percpu(net->mib.ipv6_statistics);
 err_ip_mib:
-       snmp_mib_free((void __percpu **)net->mib.udplite_stats_in6);
+       free_percpu(net->mib.udplite_stats_in6);
 err_udplite_mib:
-       snmp_mib_free((void __percpu **)net->mib.udp_stats_in6);
+       free_percpu(net->mib.udp_stats_in6);
        return -ENOMEM;
 }
 
 static void ipv6_cleanup_mibs(struct net *net)
 {
-       snmp_mib_free((void __percpu **)net->mib.udp_stats_in6);
-       snmp_mib_free((void __percpu **)net->mib.udplite_stats_in6);
-       snmp_mib_free((void __percpu **)net->mib.ipv6_statistics);
-       snmp_mib_free((void __percpu **)net->mib.icmpv6_statistics);
+       free_percpu(net->mib.udp_stats_in6);
+       free_percpu(net->mib.udplite_stats_in6);
+       free_percpu(net->mib.ipv6_statistics);
+       free_percpu(net->mib.icmpv6_statistics);
        kfree(net->mib.icmpv6msg_statistics);
 }
 
index 7b326529e6a2cba57695697cfff436357c347b2c..f6c84a6eb2389c55f4abd55fefbd073c73743a2a 100644 (file)
@@ -400,6 +400,7 @@ static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info)
        int len;
        int hlimit;
        int err = 0;
+       u32 mark = IP6_REPLY_MARK(net, skb->mark);
 
        if ((u8 *)hdr < skb->head ||
            (skb_network_header(skb) + sizeof(*hdr)) > skb_tail_pointer(skb))
@@ -466,6 +467,7 @@ static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info)
        fl6.daddr = hdr->saddr;
        if (saddr)
                fl6.saddr = *saddr;
+       fl6.flowi6_mark = mark;
        fl6.flowi6_oif = iif;
        fl6.fl6_icmp_type = type;
        fl6.fl6_icmp_code = code;
@@ -474,6 +476,7 @@ static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info)
        sk = icmpv6_xmit_lock(net);
        if (sk == NULL)
                return;
+       sk->sk_mark = mark;
        np = inet6_sk(sk);
 
        if (!icmpv6_xrlim_allow(sk, type, &fl6))
@@ -493,12 +496,7 @@ static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info)
        if (IS_ERR(dst))
                goto out;
 
-       if (ipv6_addr_is_multicast(&fl6.daddr))
-               hlimit = np->mcast_hops;
-       else
-               hlimit = np->hop_limit;
-       if (hlimit < 0)
-               hlimit = ip6_dst_hoplimit(dst);
+       hlimit = ip6_sk_dst_hoplimit(np, &fl6, dst);
 
        msg.skb = skb;
        msg.offset = skb_network_offset(skb);
@@ -556,6 +554,7 @@ static void icmpv6_echo_reply(struct sk_buff *skb)
        int err = 0;
        int hlimit;
        u8 tclass;
+       u32 mark = IP6_REPLY_MARK(net, skb->mark);
 
        saddr = &ipv6_hdr(skb)->daddr;
 
@@ -574,11 +573,13 @@ static void icmpv6_echo_reply(struct sk_buff *skb)
                fl6.saddr = *saddr;
        fl6.flowi6_oif = skb->dev->ifindex;
        fl6.fl6_icmp_type = ICMPV6_ECHO_REPLY;
+       fl6.flowi6_mark = mark;
        security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
 
        sk = icmpv6_xmit_lock(net);
        if (sk == NULL)
                return;
+       sk->sk_mark = mark;
        np = inet6_sk(sk);
 
        if (!fl6.flowi6_oif && ipv6_addr_is_multicast(&fl6.daddr))
@@ -593,12 +594,7 @@ static void icmpv6_echo_reply(struct sk_buff *skb)
        if (IS_ERR(dst))
                goto out;
 
-       if (ipv6_addr_is_multicast(&fl6.daddr))
-               hlimit = np->mcast_hops;
-       else
-               hlimit = np->hop_limit;
-       if (hlimit < 0)
-               hlimit = ip6_dst_hoplimit(dst);
+       hlimit = ip6_sk_dst_hoplimit(np, &fl6, dst);
 
        idev = __in6_dev_get(skb->dev);
 
@@ -702,22 +698,11 @@ static int icmpv6_rcv(struct sk_buff *skb)
        saddr = &ipv6_hdr(skb)->saddr;
        daddr = &ipv6_hdr(skb)->daddr;
 
-       /* Perform checksum. */
-       switch (skb->ip_summed) {
-       case CHECKSUM_COMPLETE:
-               if (!csum_ipv6_magic(saddr, daddr, skb->len, IPPROTO_ICMPV6,
-                                    skb->csum))
-                       break;
-               /* fall through */
-       case CHECKSUM_NONE:
-               skb->csum = ~csum_unfold(csum_ipv6_magic(saddr, daddr, skb->len,
-                                            IPPROTO_ICMPV6, 0));
-               if (__skb_checksum_complete(skb)) {
-                       LIMIT_NETDEBUG(KERN_DEBUG
-                                      "ICMPv6 checksum failed [%pI6c > %pI6c]\n",
-                                      saddr, daddr);
-                       goto csum_error;
-               }
+       if (skb_checksum_validate(skb, IPPROTO_ICMPV6, ip6_compute_pseudo)) {
+               LIMIT_NETDEBUG(KERN_DEBUG
+                              "ICMPv6 checksum failed [%pI6c > %pI6c]\n",
+                              saddr, daddr);
+               goto csum_error;
        }
 
        if (!pskb_pull(skb, sizeof(*hdr)))
index d4ade34ab37566d8cca9e164f5fde5fb5a762fe6..a245e5ddffbd0450968c44de7d3fcd8a1dd055cf 100644 (file)
@@ -81,7 +81,7 @@ struct dst_entry *inet6_csk_route_req(struct sock *sk,
        final_p = fl6_update_dst(fl6, np->opt, &final);
        fl6->saddr = ireq->ir_v6_loc_addr;
        fl6->flowi6_oif = ireq->ir_iif;
-       fl6->flowi6_mark = sk->sk_mark;
+       fl6->flowi6_mark = ireq->ir_mark;
        fl6->fl6_dport = ireq->ir_rmt_port;
        fl6->fl6_sport = htons(ireq->ir_num);
        security_req_classify_flow(req, flowi6_to_flowi(fl6));
index ee7a97f510cbd9f94fa24eafa43ddba201c75f3e..da26224a599323250303d0c07d42254b536b5d82 100644 (file)
@@ -75,25 +75,12 @@ int udp6_csum_init(struct sk_buff *skb, struct udphdr *uh, int proto)
                        return err;
        }
 
-       if (uh->check == 0) {
-               /* RFC 2460 section 8.1 says that we SHOULD log
-                  this error. Well, it is reasonable.
-                */
-               LIMIT_NETDEBUG(KERN_INFO "IPv6: udp checksum is 0 for [%pI6c]:%u->[%pI6c]:%u\n",
-                              &ipv6_hdr(skb)->saddr, ntohs(uh->source),
-                              &ipv6_hdr(skb)->daddr, ntohs(uh->dest));
-               return 1;
-       }
-       if (skb->ip_summed == CHECKSUM_COMPLETE &&
-           !csum_ipv6_magic(&ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr,
-                            skb->len, proto, skb->csum))
-               skb->ip_summed = CHECKSUM_UNNECESSARY;
-
-       if (!skb_csum_unnecessary(skb))
-               skb->csum = ~csum_unfold(csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
-                                                        &ipv6_hdr(skb)->daddr,
-                                                        skb->len, proto, 0));
-
-       return 0;
+       /* To support RFC 6936 (allow zero checksum in UDP/IPV6 for tunnels)
+        * we accept a checksum of zero here. When we find the socket
+        * for the UDP packet we'll check if that socket allows zero checksum
+        * for IPv6 (set by socket option).
+        */
+       return skb_checksum_init_zero_check(skb, proto, uh->check,
+                                          ip6_compute_pseudo);
 }
 EXPORT_SYMBOL(udp6_csum_init);
index 34e0ded5c14b028ebbb1bb03c1b30e8f25f98811..cb4459bd1d294d1901cc03b9c203fbcbf761f53f 100644 (file)
@@ -71,8 +71,7 @@ static DEFINE_RWLOCK(fib6_walker_lock);
 #define FWS_INIT FWS_L
 #endif
 
-static void fib6_prune_clones(struct net *net, struct fib6_node *fn,
-                             struct rt6_info *rt);
+static void fib6_prune_clones(struct net *net, struct fib6_node *fn);
 static struct rt6_info *fib6_find_prefix(struct net *net, struct fib6_node *fn);
 static struct fib6_node *fib6_repair_tree(struct net *net, struct fib6_node *fn);
 static int fib6_walk(struct fib6_walker_t *w);
@@ -941,7 +940,7 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt, struct nl_info *info,
        if (!err) {
                fib6_start_gc(info->nl_net, rt);
                if (!(rt->rt6i_flags & RTF_CACHE))
-                       fib6_prune_clones(info->nl_net, pn, rt);
+                       fib6_prune_clones(info->nl_net, pn);
        }
 
 out:
@@ -1375,7 +1374,7 @@ int fib6_del(struct rt6_info *rt, struct nl_info *info)
                        pn = pn->parent;
                }
 #endif
-               fib6_prune_clones(info->nl_net, pn, rt);
+               fib6_prune_clones(info->nl_net, pn);
        }
 
        /*
@@ -1459,7 +1458,7 @@ static int fib6_walk_continue(struct fib6_walker_t *w)
 
                                if (w->skip) {
                                        w->skip--;
-                                       continue;
+                                       goto skip;
                                }
 
                                err = w->func(w);
@@ -1469,6 +1468,7 @@ static int fib6_walk_continue(struct fib6_walker_t *w)
                                w->count++;
                                continue;
                        }
+skip:
                        w->state = FWS_U;
                case FWS_U:
                        if (fn == w->root)
@@ -1600,10 +1600,9 @@ static int fib6_prune_clone(struct rt6_info *rt, void *arg)
        return 0;
 }
 
-static void fib6_prune_clones(struct net *net, struct fib6_node *fn,
-                             struct rt6_info *rt)
+static void fib6_prune_clones(struct net *net, struct fib6_node *fn)
 {
-       fib6_clean_tree(net, fn, fib6_prune_clone, 1, rt);
+       fib6_clean_tree(net, fn, fib6_prune_clone, 1, NULL);
 }
 
 /*
index 0961b5ef866d04803cf91243aec32bb9e2ea8cf7..4052694c6f2cb196b54ef7d5235d61d74e4ea69c 100644 (file)
@@ -26,7 +26,6 @@
 #include <net/sock.h>
 
 #include <net/ipv6.h>
-#include <net/addrconf.h>
 #include <net/rawv6.h>
 #include <net/transp_v6.h>
 
index 9d921462b57f293f9f49f6ec78936c84a3f756a0..3873181ed85614a28f9857d7d53acf2be9d2b9fb 100644 (file)
@@ -72,6 +72,7 @@ struct ip6gre_net {
 };
 
 static struct rtnl_link_ops ip6gre_link_ops __read_mostly;
+static struct rtnl_link_ops ip6gre_tap_ops __read_mostly;
 static int ip6gre_tunnel_init(struct net_device *dev);
 static void ip6gre_tunnel_setup(struct net_device *dev);
 static void ip6gre_tunnel_link(struct ip6gre_net *ign, struct ip6_tnl *t);
@@ -353,10 +354,10 @@ failed_free:
 
 static void ip6gre_tunnel_uninit(struct net_device *dev)
 {
-       struct net *net = dev_net(dev);
-       struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
+       struct ip6_tnl *t = netdev_priv(dev);
+       struct ip6gre_net *ign = net_generic(t->net, ip6gre_net_id);
 
-       ip6gre_tunnel_unlink(ign, netdev_priv(dev));
+       ip6gre_tunnel_unlink(ign, t);
        dev_put(dev);
 }
 
@@ -467,17 +468,7 @@ static int ip6gre_rcv(struct sk_buff *skb)
                        goto drop;
 
                if (flags&GRE_CSUM) {
-                       switch (skb->ip_summed) {
-                       case CHECKSUM_COMPLETE:
-                               csum = csum_fold(skb->csum);
-                               if (!csum)
-                                       break;
-                               /* fall through */
-                       case CHECKSUM_NONE:
-                               skb->csum = 0;
-                               csum = __skb_checksum_complete(skb);
-                               skb->ip_summed = CHECKSUM_COMPLETE;
-                       }
+                       csum = skb_checksum_simple_validate(skb);
                        offset += 4;
                }
                if (flags&GRE_KEY) {
@@ -611,8 +602,8 @@ static netdev_tx_t ip6gre_xmit2(struct sk_buff *skb,
                         int encap_limit,
                         __u32 *pmtu)
 {
-       struct net *net = dev_net(dev);
        struct ip6_tnl *tunnel = netdev_priv(dev);
+       struct net *net = tunnel->net;
        struct net_device *tdev;    /* Device to other host */
        struct ipv6hdr  *ipv6h;     /* Our new IP header */
        unsigned int max_headroom = 0; /* The extra header space needed */
@@ -979,7 +970,7 @@ static void ip6gre_tnl_link_config(struct ip6_tnl *t, int set_mtu)
                int strict = (ipv6_addr_type(&p->raddr) &
                              (IPV6_ADDR_MULTICAST|IPV6_ADDR_LINKLOCAL));
 
-               struct rt6_info *rt = rt6_lookup(dev_net(dev),
+               struct rt6_info *rt = rt6_lookup(t->net,
                                                 &p->raddr, &p->laddr,
                                                 p->link, strict);
 
@@ -1063,13 +1054,12 @@ static int ip6gre_tunnel_ioctl(struct net_device *dev,
        int err = 0;
        struct ip6_tnl_parm2 p;
        struct __ip6_tnl_parm p1;
-       struct ip6_tnl *t;
-       struct net *net = dev_net(dev);
+       struct ip6_tnl *t = netdev_priv(dev);
+       struct net *net = t->net;
        struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
 
        switch (cmd) {
        case SIOCGETTUNNEL:
-               t = NULL;
                if (dev == ign->fb_tunnel_dev) {
                        if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p))) {
                                err = -EFAULT;
@@ -1077,9 +1067,9 @@ static int ip6gre_tunnel_ioctl(struct net_device *dev,
                        }
                        ip6gre_tnl_parm_from_user(&p1, &p);
                        t = ip6gre_tunnel_locate(net, &p1, 0);
+                       if (t == NULL)
+                               t = netdev_priv(dev);
                }
-               if (t == NULL)
-                       t = netdev_priv(dev);
                memset(&p, 0, sizeof(p));
                ip6gre_tnl_parm_to_user(&p, &t->parms);
                if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
@@ -1242,7 +1232,6 @@ static void ip6gre_tunnel_setup(struct net_device *dev)
        dev->flags |= IFF_NOARP;
        dev->iflink = 0;
        dev->addr_len = sizeof(struct in6_addr);
-       dev->features |= NETIF_F_NETNS_LOCAL;
        dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
 }
 
@@ -1297,11 +1286,17 @@ static struct inet6_protocol ip6gre_protocol __read_mostly = {
        .flags       = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
 };
 
-static void ip6gre_destroy_tunnels(struct ip6gre_net *ign,
-       struct list_head *head)
+static void ip6gre_destroy_tunnels(struct net *net, struct list_head *head)
 {
+       struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
+       struct net_device *dev, *aux;
        int prio;
 
+       for_each_netdev_safe(net, dev, aux)
+               if (dev->rtnl_link_ops == &ip6gre_link_ops ||
+                   dev->rtnl_link_ops == &ip6gre_tap_ops)
+                       unregister_netdevice_queue(dev, head);
+
        for (prio = 0; prio < 4; prio++) {
                int h;
                for (h = 0; h < HASH_SIZE; h++) {
@@ -1310,7 +1305,12 @@ static void ip6gre_destroy_tunnels(struct ip6gre_net *ign,
                        t = rtnl_dereference(ign->tunnels[prio][h]);
 
                        while (t != NULL) {
-                               unregister_netdevice_queue(t->dev, head);
+                               /* If dev is in the same netns, it has already
+                                * been added to the list by the previous loop.
+                                */
+                               if (!net_eq(dev_net(t->dev), net))
+                                       unregister_netdevice_queue(t->dev,
+                                                                  head);
                                t = rtnl_dereference(t->next);
                        }
                }
@@ -1329,6 +1329,11 @@ static int __net_init ip6gre_init_net(struct net *net)
                goto err_alloc_dev;
        }
        dev_net_set(ign->fb_tunnel_dev, net);
+       /* FB netdevice is special: we have one, and only one per netns.
+        * Allowing to move it to another netns is clearly unsafe.
+        */
+       ign->fb_tunnel_dev->features |= NETIF_F_NETNS_LOCAL;
+
 
        ip6gre_fb_tunnel_init(ign->fb_tunnel_dev);
        ign->fb_tunnel_dev->rtnl_link_ops = &ip6gre_link_ops;
@@ -1349,12 +1354,10 @@ err_alloc_dev:
 
 static void __net_exit ip6gre_exit_net(struct net *net)
 {
-       struct ip6gre_net *ign;
        LIST_HEAD(list);
 
-       ign = net_generic(net, ip6gre_net_id);
        rtnl_lock();
-       ip6gre_destroy_tunnels(ign, &list);
+       ip6gre_destroy_tunnels(net, &list);
        unregister_netdevice_many(&list);
        rtnl_unlock();
 }
@@ -1531,15 +1534,14 @@ out:
 static int ip6gre_changelink(struct net_device *dev, struct nlattr *tb[],
                            struct nlattr *data[])
 {
-       struct ip6_tnl *t, *nt;
-       struct net *net = dev_net(dev);
+       struct ip6_tnl *t, *nt = netdev_priv(dev);
+       struct net *net = nt->net;
        struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
        struct __ip6_tnl_parm p;
 
        if (dev == ign->fb_tunnel_dev)
                return -EINVAL;
 
-       nt = netdev_priv(dev);
        ip6gre_netlink_parms(data, &p);
 
        t = ip6gre_tunnel_locate(net, &p, 0);
index 40e7581374f7006c6f8c436ed686919ac93c2b19..ab0cc57f779ca58a1f5ef509fbac6cff7327bc54 100644 (file)
@@ -219,7 +219,7 @@ int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
        skb->mark = sk->sk_mark;
 
        mtu = dst_mtu(dst);
-       if ((skb->len <= mtu) || skb->local_df || skb_is_gso(skb)) {
+       if ((skb->len <= mtu) || skb->ignore_df || skb_is_gso(skb)) {
                IP6_UPD_PO_STATS(net, ip6_dst_idev(skb_dst(skb)),
                              IPSTATS_MIB_OUT, skb->len);
                return NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, skb, NULL,
@@ -344,12 +344,16 @@ static unsigned int ip6_dst_mtu_forward(const struct dst_entry *dst)
 
 static bool ip6_pkt_too_big(const struct sk_buff *skb, unsigned int mtu)
 {
-       if (skb->len <= mtu || skb->local_df)
+       if (skb->len <= mtu)
                return false;
 
+       /* ipv6 conntrack defrag sets max_frag_size + ignore_df */
        if (IP6CB(skb)->frag_max_size && IP6CB(skb)->frag_max_size > mtu)
                return true;
 
+       if (skb->ignore_df)
+               return false;
+
        if (skb_is_gso(skb) && skb_gso_network_seglen(skb) <= mtu)
                return false;
 
@@ -555,7 +559,7 @@ int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
        /* We must not fragment if the socket is set to force MTU discovery
         * or if the skb it not generated by a local socket.
         */
-       if (unlikely(!skb->local_df && skb->len > mtu) ||
+       if (unlikely(!skb->ignore_df && skb->len > mtu) ||
                     (IP6CB(skb)->frag_max_size &&
                      IP6CB(skb)->frag_max_size > mtu)) {
                if (skb->sk && dst_allfrag(skb_dst(skb)))
@@ -1230,7 +1234,7 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
                              sizeof(struct frag_hdr) : 0) +
                             rt->rt6i_nfheader_len;
 
-               if (ip6_sk_local_df(sk))
+               if (ip6_sk_ignore_df(sk))
                        maxnonfragsize = sizeof(struct ipv6hdr) + IPV6_MAXPLEN;
                else
                        maxnonfragsize = mtu;
@@ -1540,7 +1544,7 @@ int ip6_push_pending_frames(struct sock *sk)
        }
 
        /* Allow local fragmentation. */
-       skb->local_df = ip6_sk_local_df(sk);
+       skb->ignore_df = ip6_sk_ignore_df(sk);
 
        *final_dst = fl6->daddr;
        __skb_pull(skb, skb_network_header_len(skb));
index b05b609f69d1cd3e58bd525cb0b5e8b11d429b80..fe61545dde71d0a20c5ea85acc9dc614f414ccdb 100644 (file)
@@ -61,6 +61,7 @@
 MODULE_AUTHOR("Ville Nuorvala");
 MODULE_DESCRIPTION("IPv6 tunneling device");
 MODULE_LICENSE("GPL");
+MODULE_ALIAS_RTNL_LINK("ip6tnl");
 MODULE_ALIAS_NETDEV("ip6tnl0");
 
 #ifdef IP6_TNL_DEBUG
index b7c0f827140b402685cc29049cb56646471c2cf2..2953c0c26c27d4c4c50f78a271af2a98f753e41e 100644 (file)
@@ -792,15 +792,12 @@ static const struct net_device_ops vti6_netdev_ops = {
  **/
 static void vti6_dev_setup(struct net_device *dev)
 {
-       struct ip6_tnl *t;
-
        dev->netdev_ops = &vti6_netdev_ops;
        dev->destructor = vti6_dev_free;
 
        dev->type = ARPHRD_TUNNEL6;
        dev->hard_header_len = LL_MAX_HEADER + sizeof(struct ipv6hdr);
        dev->mtu = ETH_DATA_LEN;
-       t = netdev_priv(dev);
        dev->flags |= IFF_NOARP;
        dev->addr_len = sizeof(struct in6_addr);
        dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
index 8659067da28e8a8557af6f4109fc52056ccdee1b..8250474ab7dc0e10b3340bca3e68aaf9377a81f2 100644 (file)
@@ -1633,7 +1633,7 @@ struct sock *mroute6_socket(struct net *net, struct sk_buff *skb)
 {
        struct mr6_table *mrt;
        struct flowi6 fl6 = {
-               .flowi6_iif     = skb->skb_iif,
+               .flowi6_iif     = skb->skb_iif ? : LOOPBACK_IFINDEX,
                .flowi6_oif     = skb->dev->ifindex,
                .flowi6_mark    = skb->mark,
        };
index 95f3f1da0d7f2ff20c3afa3eeda315dd9e2e6b5f..d38e6a8d8b9fb82ec7d583a5ab2abc652838d470 100644 (file)
@@ -30,13 +30,15 @@ int ip6_route_me_harder(struct sk_buff *skb)
                .daddr = iph->daddr,
                .saddr = iph->saddr,
        };
+       int err;
 
        dst = ip6_route_output(net, skb->sk, &fl6);
-       if (dst->error) {
+       err = dst->error;
+       if (err) {
                IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES);
                LIMIT_NETDEBUG(KERN_DEBUG "ip6_route_me_harder: No more route.\n");
                dst_release(dst);
-               return dst->error;
+               return err;
        }
 
        /* Drop old route. */
index e0983f3648a628410c6f6bfd9549ec339a325353..790e0c6b19e1caa41b31c5f279de719581bd6065 100644 (file)
@@ -33,6 +33,7 @@ static bool rpfilter_lookup_reverse6(const struct sk_buff *skb,
        struct ipv6hdr *iph = ipv6_hdr(skb);
        bool ret = false;
        struct flowi6 fl6 = {
+               .flowi6_iif = LOOPBACK_IFINDEX,
                .flowlabel = (* (__be32 *) iph) & IPV6_FLOWINFO_MASK,
                .flowi6_proto = iph->nexthdr,
                .daddr = iph->saddr,
index 767ab8da82189479456c632ecec95eef67da4bae..0d5279fd852a48643b5b0b54834b2ee68a893116 100644 (file)
@@ -451,7 +451,7 @@ nf_ct_frag6_reasm(struct frag_queue *fq, struct net_device *dev)
        }
        sub_frag_mem_limit(&fq->q, head->truesize);
 
-       head->local_df = 1;
+       head->ignore_df = 1;
        head->next = NULL;
        head->dev = dev;
        head->tstamp = fq->q.stamp;
index bda74291c3e0d09c94ff5961cf393cb7695ee518..a2a1d80dfe0c0e3cab9ed1eadd51bb3401797b11 100644 (file)
@@ -168,12 +168,7 @@ int ping_v6_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
        pfh.wcheck = 0;
        pfh.family = AF_INET6;
 
-       if (ipv6_addr_is_multicast(&fl6.daddr))
-               hlimit = np->mcast_hops;
-       else
-               hlimit = np->hop_limit;
-       if (hlimit < 0)
-               hlimit = ip6_dst_hoplimit(dst);
+       hlimit = ip6_sk_dst_hoplimit(np, &fl6, dst);
 
        lock_sock(sk);
        err = ip6_append_data(sk, ping_getfrag, &pfh, len,
index 091d066a57b3711c5bd0eb5a352573d0aa2f3776..3317440ea34174032a7b0bee73d8f5b36cfb08ee 100644 (file)
@@ -186,7 +186,7 @@ static void snmp6_seq_show_icmpv6msg(struct seq_file *seq, atomic_long_t *smib)
 /* can be called either with percpu mib (pcpumib != NULL),
  * or shared one (smib != NULL)
  */
-static void snmp6_seq_show_item(struct seq_file *seq, void __percpu **pcpumib,
+static void snmp6_seq_show_item(struct seq_file *seq, void __percpu *pcpumib,
                                atomic_long_t *smib,
                                const struct snmp_mib *itemlist)
 {
@@ -201,7 +201,7 @@ static void snmp6_seq_show_item(struct seq_file *seq, void __percpu **pcpumib,
        }
 }
 
-static void snmp6_seq_show_item64(struct seq_file *seq, void __percpu **mib,
+static void snmp6_seq_show_item64(struct seq_file *seq, void __percpu *mib,
                                  const struct snmp_mib *itemlist, size_t syncpoff)
 {
        int i;
@@ -215,14 +215,14 @@ static int snmp6_seq_show(struct seq_file *seq, void *v)
 {
        struct net *net = (struct net *)seq->private;
 
-       snmp6_seq_show_item64(seq, (void __percpu **)net->mib.ipv6_statistics,
+       snmp6_seq_show_item64(seq, net->mib.ipv6_statistics,
                            snmp6_ipstats_list, offsetof(struct ipstats_mib, syncp));
-       snmp6_seq_show_item(seq, (void __percpu **)net->mib.icmpv6_statistics,
+       snmp6_seq_show_item(seq, net->mib.icmpv6_statistics,
                            NULL, snmp6_icmp6_list);
        snmp6_seq_show_icmpv6msg(seq, net->mib.icmpv6msg_statistics->mibs);
-       snmp6_seq_show_item(seq, (void __percpu **)net->mib.udp_stats_in6,
+       snmp6_seq_show_item(seq, net->mib.udp_stats_in6,
                            NULL, snmp6_udp6_list);
-       snmp6_seq_show_item(seq, (void __percpu **)net->mib.udplite_stats_in6,
+       snmp6_seq_show_item(seq, net->mib.udplite_stats_in6,
                            NULL, snmp6_udplite6_list);
        return 0;
 }
@@ -245,7 +245,7 @@ static int snmp6_dev_seq_show(struct seq_file *seq, void *v)
        struct inet6_dev *idev = (struct inet6_dev *)seq->private;
 
        seq_printf(seq, "%-32s\t%u\n", "ifIndex", idev->dev->ifindex);
-       snmp6_seq_show_item64(seq, (void __percpu **)idev->stats.ipv6,
+       snmp6_seq_show_item64(seq, idev->stats.ipv6,
                            snmp6_ipstats_list, offsetof(struct ipstats_mib, syncp));
        snmp6_seq_show_item(seq, NULL, idev->stats.icmpv6dev->mibs,
                            snmp6_icmp6_list);
index 1f29996e368a23e67bfe09d9b2ff10bd9b61805d..dddfb5fa2b7a289f80fcf81d67ff332a678ed398 100644 (file)
@@ -873,14 +873,8 @@ static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk,
                err = PTR_ERR(dst);
                goto out;
        }
-       if (hlimit < 0) {
-               if (ipv6_addr_is_multicast(&fl6.daddr))
-                       hlimit = np->mcast_hops;
-               else
-                       hlimit = np->hop_limit;
-               if (hlimit < 0)
-                       hlimit = ip6_dst_hoplimit(dst);
-       }
+       if (hlimit < 0)
+               hlimit = ip6_sk_dst_hoplimit(np, &fl6, dst);
 
        if (tclass < 0)
                tclass = np->tclass;
index 4011617cca688850c4d530f2e35d9890203dedaf..aa883afa652dc9469f38ea51c978218981179ac8 100644 (file)
@@ -1176,7 +1176,7 @@ void ip6_update_pmtu(struct sk_buff *skb, struct net *net, __be32 mtu,
 
        memset(&fl6, 0, sizeof(fl6));
        fl6.flowi6_oif = oif;
-       fl6.flowi6_mark = mark;
+       fl6.flowi6_mark = mark ? mark : IP6_REPLY_MARK(net, skb->mark);
        fl6.daddr = iph->daddr;
        fl6.saddr = iph->saddr;
        fl6.flowlabel = ip6_flowinfo(iph);
@@ -1273,6 +1273,7 @@ void ip6_redirect(struct sk_buff *skb, struct net *net, int oif, u32 mark)
        struct flowi6 fl6;
 
        memset(&fl6, 0, sizeof(fl6));
+       fl6.flowi6_iif = LOOPBACK_IFINDEX;
        fl6.flowi6_oif = oif;
        fl6.flowi6_mark = mark;
        fl6.daddr = iph->daddr;
@@ -1294,6 +1295,7 @@ void ip6_redirect_no_header(struct sk_buff *skb, struct net *net, int oif,
        struct flowi6 fl6;
 
        memset(&fl6, 0, sizeof(fl6));
+       fl6.flowi6_iif = LOOPBACK_IFINDEX;
        fl6.flowi6_oif = oif;
        fl6.flowi6_mark = mark;
        fl6.daddr = msg->dest;
@@ -1453,7 +1455,7 @@ static int ip6_dst_gc(struct dst_ops *ops)
                goto out;
 
        net->ipv6.ip6_rt_gc_expire++;
-       fib6_run_gc(net->ipv6.ip6_rt_gc_expire, net, entries > rt_max_size);
+       fib6_run_gc(net->ipv6.ip6_rt_gc_expire, net, true);
        entries = dst_entries_get_slow(ops);
        if (entries < ops->gc_thresh)
                net->ipv6.ip6_rt_gc_expire = rt_gc_timeout>>1;
index e5a453ca302e1e55e4d8e6ca7069f97fbb2b6347..f4380041f5e7b04211d7d73a05cb3989e8a9af94 100644 (file)
@@ -1828,4 +1828,5 @@ xfrm_tunnel_failed:
 module_init(sit_init);
 module_exit(sit_cleanup);
 MODULE_LICENSE("GPL");
+MODULE_ALIAS_RTNL_LINK("sit");
 MODULE_ALIAS_NETDEV("sit0");
index bb53a5e73c1ab67c7a11430488b8418c4edbf98b..a822b880689b5fea5adeed30956afd2328a9c8b9 100644 (file)
@@ -216,6 +216,8 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
            ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL)
                ireq->ir_iif = inet6_iif(skb);
 
+       ireq->ir_mark = inet_request_mark(sk, skb);
+
        req->expires = 0UL;
        req->num_retrans = 0;
        ireq->ecn_ok            = ecn_ok;
@@ -242,7 +244,7 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
                final_p = fl6_update_dst(&fl6, np->opt, &final);
                fl6.saddr = ireq->ir_v6_loc_addr;
                fl6.flowi6_oif = sk->sk_bound_dev_if;
-               fl6.flowi6_mark = sk->sk_mark;
+               fl6.flowi6_mark = ireq->ir_mark;
                fl6.fl6_dport = ireq->ir_rmt_port;
                fl6.fl6_sport = inet_sk(sk)->inet_sport;
                security_req_classify_flow(req, flowi6_to_flowi(&fl6));
index 7f405a168822afab4fa5349317ef43f2ed8e3a0f..058f3eca2e53efd1fe016cfe8450d3ab0a9c13b1 100644 (file)
@@ -38,6 +38,13 @@ static struct ctl_table ipv6_table_template[] = {
                .mode           = 0644,
                .proc_handler   = proc_dointvec
        },
+       {
+               .procname       = "fwmark_reflect",
+               .data           = &init_net.ipv6.sysctl.fwmark_reflect,
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec
+       },
        { }
 };
 
index e289830ed6e35a3be4feda700b5b6789dac20292..f07b2abba3592b729248dfce3c06ec34f3741c32 100644 (file)
@@ -340,7 +340,8 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
        struct sock *sk;
        int err;
        struct tcp_sock *tp;
-       __u32 seq;
+       struct request_sock *fastopen;
+       __u32 seq, snd_una;
        struct net *net = dev_net(skb->dev);
 
        sk = inet6_lookup(net, &tcp_hashinfo, &hdr->daddr,
@@ -371,8 +372,11 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
 
        tp = tcp_sk(sk);
        seq = ntohl(th->seq);
+       /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
+       fastopen = tp->fastopen_rsk;
+       snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
        if (sk->sk_state != TCP_LISTEN &&
-           !between(seq, tp->snd_una, tp->snd_nxt)) {
+           !between(seq, snd_una, tp->snd_nxt)) {
                NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
                goto out;
        }
@@ -436,8 +440,13 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
                goto out;
 
        case TCP_SYN_SENT:
-       case TCP_SYN_RECV:  /* Cannot happen.
-                              It can, it SYNs are crossed. --ANK */
+       case TCP_SYN_RECV:
+               /* Only in fast or simultaneous open. If a fast open socket is
+                * is already accepted it is treated as a connected one below.
+                */
+               if (fastopen && fastopen->sk == NULL)
+                       break;
+
                if (!sock_owned_by_user(sk)) {
                        sk->sk_err = err;
                        sk->sk_error_report(sk);                /* Wake people up to see the error (see connect in sock.c) */
@@ -463,7 +472,8 @@ out:
 static int tcp_v6_send_synack(struct sock *sk, struct dst_entry *dst,
                              struct flowi6 *fl6,
                              struct request_sock *req,
-                             u16 queue_mapping)
+                             u16 queue_mapping,
+                             struct tcp_fastopen_cookie *foc)
 {
        struct inet_request_sock *ireq = inet_rsk(req);
        struct ipv6_pinfo *np = inet6_sk(sk);
@@ -474,7 +484,7 @@ static int tcp_v6_send_synack(struct sock *sk, struct dst_entry *dst,
        if (!dst && (dst = inet6_csk_route_req(sk, fl6, req)) == NULL)
                goto done;
 
-       skb = tcp_make_synack(sk, dst, req, NULL);
+       skb = tcp_make_synack(sk, dst, req, foc);
 
        if (skb) {
                __tcp_v6_send_check(skb, &ireq->ir_v6_loc_addr,
@@ -498,7 +508,7 @@ static int tcp_v6_rtx_synack(struct sock *sk, struct request_sock *req)
        struct flowi6 fl6;
        int res;
 
-       res = tcp_v6_send_synack(sk, NULL, &fl6, req, 0);
+       res = tcp_v6_send_synack(sk, NULL, &fl6, req, 0, NULL);
        if (!res) {
                TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
                NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSYNRETRANS);
@@ -802,6 +812,7 @@ static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win,
                fl6.flowi6_oif = inet6_iif(skb);
        else
                fl6.flowi6_oif = oif;
+       fl6.flowi6_mark = IP6_REPLY_MARK(net, skb->mark);
        fl6.fl6_dport = t1->dest;
        fl6.fl6_sport = t1->source;
        security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
@@ -917,7 +928,12 @@ static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
 static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
                                  struct request_sock *req)
 {
-       tcp_v6_send_ack(skb, tcp_rsk(req)->snt_isn + 1, tcp_rsk(req)->rcv_isn + 1,
+       /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
+        * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
+        */
+       tcp_v6_send_ack(skb, (sk->sk_state == TCP_LISTEN) ?
+                       tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
+                       tcp_rsk(req)->rcv_nxt,
                        req->rcv_wnd, tcp_time_stamp, req->ts_recent, sk->sk_bound_dev_if,
                        tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr),
                        0, 0);
@@ -969,8 +985,10 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
        struct tcp_sock *tp = tcp_sk(sk);
        __u32 isn = TCP_SKB_CB(skb)->when;
        struct dst_entry *dst = NULL;
+       struct tcp_fastopen_cookie foc = { .len = -1 };
+       bool want_cookie = false, fastopen;
        struct flowi6 fl6;
-       bool want_cookie = false;
+       int err;
 
        if (skb->protocol == htons(ETH_P_IP))
                return tcp_v4_conn_request(sk, skb);
@@ -1001,7 +1019,7 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
        tcp_clear_options(&tmp_opt);
        tmp_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
        tmp_opt.user_mss = tp->rx_opt.user_mss;
-       tcp_parse_options(skb, &tmp_opt, 0, NULL);
+       tcp_parse_options(skb, &tmp_opt, 0, want_cookie ? NULL : &foc);
 
        if (want_cookie && !tmp_opt.saw_tstamp)
                tcp_clear_options(&tmp_opt);
@@ -1016,6 +1034,7 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
                TCP_ECN_create_request(req, skb, sock_net(sk));
 
        ireq->ir_iif = sk->sk_bound_dev_if;
+       ireq->ir_mark = inet_request_mark(sk, skb);
 
        /* So that link locals have meaning */
        if (!sk->sk_bound_dev_if &&
@@ -1074,19 +1093,27 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
                isn = tcp_v6_init_sequence(skb);
        }
 have_isn:
-       tcp_rsk(req)->snt_isn = isn;
 
        if (security_inet_conn_request(sk, skb, req))
                goto drop_and_release;
 
-       if (tcp_v6_send_synack(sk, dst, &fl6, req,
-                              skb_get_queue_mapping(skb)) ||
-           want_cookie)
+       if (!dst && (dst = inet6_csk_route_req(sk, &fl6, req)) == NULL)
                goto drop_and_free;
 
+       tcp_rsk(req)->snt_isn = isn;
        tcp_rsk(req)->snt_synack = tcp_time_stamp;
-       tcp_rsk(req)->listener = NULL;
-       inet6_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
+       tcp_openreq_init_rwin(req, sk, dst);
+       fastopen = !want_cookie &&
+                  tcp_try_fastopen(sk, skb, req, &foc, dst);
+       err = tcp_v6_send_synack(sk, dst, &fl6, req,
+                                skb_get_queue_mapping(skb), &foc);
+       if (!fastopen) {
+               if (err || want_cookie)
+                       goto drop_and_free;
+
+               tcp_rsk(req)->listener = NULL;
+               inet6_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
+       }
        return 0;
 
 drop_and_release:
@@ -1294,25 +1321,6 @@ out:
        return NULL;
 }
 
-static __sum16 tcp_v6_checksum_init(struct sk_buff *skb)
-{
-       if (skb->ip_summed == CHECKSUM_COMPLETE) {
-               if (!tcp_v6_check(skb->len, &ipv6_hdr(skb)->saddr,
-                                 &ipv6_hdr(skb)->daddr, skb->csum)) {
-                       skb->ip_summed = CHECKSUM_UNNECESSARY;
-                       return 0;
-               }
-       }
-
-       skb->csum = ~csum_unfold(tcp_v6_check(skb->len,
-                                             &ipv6_hdr(skb)->saddr,
-                                             &ipv6_hdr(skb)->daddr, 0));
-
-       if (skb->len <= 76)
-               return __skb_checksum_complete(skb);
-       return 0;
-}
-
 /* The socket must have it's spinlock held when we get
  * here.
  *
@@ -1486,7 +1494,7 @@ static int tcp_v6_rcv(struct sk_buff *skb)
        if (!pskb_may_pull(skb, th->doff*4))
                goto discard_it;
 
-       if (!skb_csum_unnecessary(skb) && tcp_v6_checksum_init(skb))
+       if (skb_checksum_init(skb, IPPROTO_TCP, ip6_compute_pseudo))
                goto csum_error;
 
        th = tcp_hdr(skb);
@@ -1779,6 +1787,7 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
        const struct inet_sock *inet = inet_sk(sp);
        const struct tcp_sock *tp = tcp_sk(sp);
        const struct inet_connection_sock *icsk = inet_csk(sp);
+       struct fastopen_queue *fastopenq = icsk->icsk_accept_queue.fastopenq;
 
        dest  = &sp->sk_v6_daddr;
        src   = &sp->sk_v6_rcv_saddr;
@@ -1821,7 +1830,9 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
                   jiffies_to_clock_t(icsk->icsk_ack.ato),
                   (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
                   tp->snd_cwnd,
-                  tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh
+                  sp->sk_state == TCP_LISTEN ?
+                       (fastopenq ? fastopenq->max_qlen : 0) :
+                       (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh)
                   );
 }
 
index 1e586d92260e1e75957060b896a8748e9beaa61a..7edf096867c4205342152ea8a5fba6f7ad9a4a66 100644 (file)
@@ -634,6 +634,10 @@ int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
                if (skb->len > sizeof(struct udphdr) && encap_rcv != NULL) {
                        int ret;
 
+                       /* Verify checksum before giving to encap */
+                       if (udp_lib_checksum_complete(skb))
+                               goto csum_error;
+
                        ret = encap_rcv(sk, skb);
                        if (ret <= 0) {
                                UDP_INC_STATS_BH(sock_net(sk),
@@ -760,6 +764,17 @@ static void flush_stack(struct sock **stack, unsigned int count,
        if (unlikely(skb1))
                kfree_skb(skb1);
 }
+
+static void udp6_csum_zero_error(struct sk_buff *skb)
+{
+       /* RFC 2460 section 8.1 says that we SHOULD log
+        * this error. Well, it is reasonable.
+        */
+       LIMIT_NETDEBUG(KERN_INFO "IPv6: udp checksum is 0 for [%pI6c]:%u->[%pI6c]:%u\n",
+                      &ipv6_hdr(skb)->saddr, ntohs(udp_hdr(skb)->source),
+                      &ipv6_hdr(skb)->daddr, ntohs(udp_hdr(skb)->dest));
+}
+
 /*
  * Note: called only from the BH handler context,
  * so we don't need to lock the hashes.
@@ -779,7 +794,12 @@ static int __udp6_lib_mcast_deliver(struct net *net, struct sk_buff *skb,
        dif = inet6_iif(skb);
        sk = udp_v6_mcast_next(net, sk, uh->dest, daddr, uh->source, saddr, dif);
        while (sk) {
-               stack[count++] = sk;
+               /* If zero checksum and sk_no_check is not on for
+                * the socket then skip it.
+                */
+               if (uh->check || sk->sk_no_check)
+                       stack[count++] = sk;
+
                sk = udp_v6_mcast_next(net, sk_nulls_next(sk), uh->dest, daddr,
                                       uh->source, saddr, dif);
                if (unlikely(count == ARRAY_SIZE(stack))) {
@@ -867,6 +887,12 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
        if (sk != NULL) {
                int ret;
 
+               if (!uh->check && !sk->sk_no_check) {
+                       sock_put(sk);
+                       udp6_csum_zero_error(skb);
+                       goto csum_error;
+               }
+
                ret = udpv6_queue_rcv_skb(sk, skb);
                sock_put(sk);
 
@@ -879,6 +905,11 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
                return 0;
        }
 
+       if (!uh->check) {
+               udp6_csum_zero_error(skb);
+               goto csum_error;
+       }
+
        if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
                goto discard;
 
@@ -1006,7 +1037,10 @@ static int udp_v6_push_pending_frames(struct sock *sk)
 
        if (is_udplite)
                csum = udplite_csum_outgoing(sk, skb);
-       else if (skb->ip_summed == CHECKSUM_PARTIAL) { /* UDP hardware csum */
+       else if (sk->sk_no_check == UDP_CSUM_NOXMIT) {   /* UDP csum disabled */
+               skb->ip_summed = CHECKSUM_NONE;
+               goto send;
+       } else if (skb->ip_summed == CHECKSUM_PARTIAL) { /* UDP hardware csum */
                udp6_hwcsum_outgoing(sk, skb, &fl6->saddr, &fl6->daddr,
                                     up->len);
                goto send;
@@ -1232,14 +1266,8 @@ do_udp_sendmsg:
                goto out;
        }
 
-       if (hlimit < 0) {
-               if (ipv6_addr_is_multicast(&fl6.daddr))
-                       hlimit = np->mcast_hops;
-               else
-                       hlimit = np->hop_limit;
-               if (hlimit < 0)
-                       hlimit = ip6_dst_hoplimit(dst);
-       }
+       if (hlimit < 0)
+               hlimit = ip6_sk_dst_hoplimit(np, &fl6, dst);
 
        if (tclass < 0)
                tclass = np->tclass;
index 19ef329bdbf8e7418fa1d352bb6c90218935831e..f47c8b153dd355a379bf4aa83ecc79b58871aff3 100644 (file)
@@ -78,7 +78,7 @@ static int xfrm6_tunnel_check_size(struct sk_buff *skb)
        if (mtu < IPV6_MIN_MTU)
                mtu = IPV6_MIN_MTU;
 
-       if (!skb->local_df && skb->len > mtu) {
+       if (!skb->ignore_df && skb->len > mtu) {
                skb->dev = dst->dev;
 
                if (xfrm6_local_dontfrag(skb))
@@ -120,7 +120,7 @@ int xfrm6_prepare_output(struct xfrm_state *x, struct sk_buff *skb)
 #endif
 
        skb->protocol = htons(ETH_P_IPV6);
-       skb->local_df = 1;
+       skb->ignore_df = 1;
 
        return x->outer_mode->output2(x, skb);
 }
@@ -150,7 +150,7 @@ static int __xfrm6_output(struct sk_buff *skb)
        if (skb->len > mtu && xfrm6_local_dontfrag(skb)) {
                xfrm6_local_rxpmtu(skb, mtu);
                return -EMSGSIZE;
-       } else if (!skb->local_df && skb->len > mtu && skb->sk) {
+       } else if (!skb->ignore_df && skb->len > mtu && skb->sk) {
                xfrm_local_error(skb, mtu);
                return -EMSGSIZE;
        }
index a4e37d7158dcca42455a04eaf0460a48e39242fd..ed0716a075ba928f5d4c8942eeed9839d3389f3f 100644 (file)
@@ -495,52 +495,6 @@ out:
        spin_unlock_bh(&session->reorder_q.lock);
 }
 
-static inline int l2tp_verify_udp_checksum(struct sock *sk,
-                                          struct sk_buff *skb)
-{
-       struct udphdr *uh = udp_hdr(skb);
-       u16 ulen = ntohs(uh->len);
-       __wsum psum;
-
-       if (sk->sk_no_check || skb_csum_unnecessary(skb))
-               return 0;
-
-#if IS_ENABLED(CONFIG_IPV6)
-       if (sk->sk_family == PF_INET6 && !l2tp_tunnel(sk)->v4mapped) {
-               if (!uh->check) {
-                       LIMIT_NETDEBUG(KERN_INFO "L2TP: IPv6: checksum is 0\n");
-                       return 1;
-               }
-               if ((skb->ip_summed == CHECKSUM_COMPLETE) &&
-                   !csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
-                                    &ipv6_hdr(skb)->daddr, ulen,
-                                    IPPROTO_UDP, skb->csum)) {
-                       skb->ip_summed = CHECKSUM_UNNECESSARY;
-                       return 0;
-               }
-               skb->csum = ~csum_unfold(csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
-                                                        &ipv6_hdr(skb)->daddr,
-                                                        skb->len, IPPROTO_UDP,
-                                                        0));
-       } else
-#endif
-       {
-               struct inet_sock *inet;
-               if (!uh->check)
-                       return 0;
-               inet = inet_sk(sk);
-               psum = csum_tcpudp_nofold(inet->inet_saddr, inet->inet_daddr,
-                                         ulen, IPPROTO_UDP, 0);
-
-               if ((skb->ip_summed == CHECKSUM_COMPLETE) &&
-                   !csum_fold(csum_add(psum, skb->csum)))
-                       return 0;
-               skb->csum = psum;
-       }
-
-       return __skb_checksum_complete(skb);
-}
-
 static int l2tp_seq_check_rx_window(struct l2tp_session *session, u32 nr)
 {
        u32 nws;
@@ -895,8 +849,7 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb,
        u16 version;
        int length;
 
-       if (tunnel->sock && l2tp_verify_udp_checksum(tunnel->sock, skb))
-               goto discard_bad_csum;
+       /* UDP has verifed checksum */
 
        /* UDP always verifies the packet length. */
        __skb_pull(skb, sizeof(struct udphdr));
@@ -979,14 +932,6 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb,
 
        return 0;
 
-discard_bad_csum:
-       LIMIT_NETDEBUG("%s: UDP: bad checksum\n", tunnel->name);
-       UDP_INC_STATS_USER(tunnel->l2tp_net, UDP_MIB_INERRORS, 0);
-       atomic_long_inc(&tunnel->stats.rx_errors);
-       kfree_skb(skb);
-
-       return 0;
-
 error:
        /* Put UDP header back */
        __skb_push(skb, sizeof(struct udphdr));
@@ -1128,7 +1073,7 @@ static int l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb,
        }
 
        /* Queue the packet to IP for output */
-       skb->local_df = 1;
+       skb->ignore_df = 1;
 #if IS_ENABLED(CONFIG_IPV6)
        if (tunnel->sock->sk_family == PF_INET6 && !tunnel->v4mapped)
                error = inet6_csk_xmit(tunnel->sock, skb, NULL);
index 7704ea9502fdc9e49a2b8bb7fda4b9b9ec8d1722..e472d44a3b91eb74e5614d22c4c7c68811ca4a84 100644 (file)
@@ -605,14 +605,8 @@ static int l2tp_ip6_sendmsg(struct kiocb *iocb, struct sock *sk,
                goto out;
        }
 
-       if (hlimit < 0) {
-               if (ipv6_addr_is_multicast(&fl6.daddr))
-                       hlimit = np->mcast_hops;
-               else
-                       hlimit = np->hop_limit;
-               if (hlimit < 0)
-                       hlimit = ip6_dst_hoplimit(dst);
-       }
+       if (hlimit < 0)
+               hlimit = ip6_sk_dst_hoplimit(np, &fl6, dst);
 
        if (tclass < 0)
                tclass = np->tclass;
index 7c7df475a401693dd44cb1a6b9d68dd255b2acd5..ec24378caaafaf333152e856aa0e2e920ddbb13f 100644 (file)
@@ -23,12 +23,13 @@ void ieee80211_aes_ccm_encrypt(struct crypto_aead *tfm, u8 *b_0, u8 *aad,
                               u8 *data, size_t data_len, u8 *mic)
 {
        struct scatterlist assoc, pt, ct[2];
-       struct {
-               struct aead_request     req;
-               u8                      priv[crypto_aead_reqsize(tfm)];
-       } aead_req;
 
-       memset(&aead_req, 0, sizeof(aead_req));
+       char aead_req_data[sizeof(struct aead_request) +
+                          crypto_aead_reqsize(tfm)]
+               __aligned(__alignof__(struct aead_request));
+       struct aead_request *aead_req = (void *) aead_req_data;
+
+       memset(aead_req, 0, sizeof(aead_req_data));
 
        sg_init_one(&pt, data, data_len);
        sg_init_one(&assoc, &aad[2], be16_to_cpup((__be16 *)aad));
@@ -36,23 +37,23 @@ void ieee80211_aes_ccm_encrypt(struct crypto_aead *tfm, u8 *b_0, u8 *aad,
        sg_set_buf(&ct[0], data, data_len);
        sg_set_buf(&ct[1], mic, IEEE80211_CCMP_MIC_LEN);
 
-       aead_request_set_tfm(&aead_req.req, tfm);
-       aead_request_set_assoc(&aead_req.req, &assoc, assoc.length);
-       aead_request_set_crypt(&aead_req.req, &pt, ct, data_len, b_0);
+       aead_request_set_tfm(aead_req, tfm);
+       aead_request_set_assoc(aead_req, &assoc, assoc.length);
+       aead_request_set_crypt(aead_req, &pt, ct, data_len, b_0);
 
-       crypto_aead_encrypt(&aead_req.req);
+       crypto_aead_encrypt(aead_req);
 }
 
 int ieee80211_aes_ccm_decrypt(struct crypto_aead *tfm, u8 *b_0, u8 *aad,
                              u8 *data, size_t data_len, u8 *mic)
 {
        struct scatterlist assoc, pt, ct[2];
-       struct {
-               struct aead_request     req;
-               u8                      priv[crypto_aead_reqsize(tfm)];
-       } aead_req;
+       char aead_req_data[sizeof(struct aead_request) +
+                          crypto_aead_reqsize(tfm)]
+               __aligned(__alignof__(struct aead_request));
+       struct aead_request *aead_req = (void *) aead_req_data;
 
-       memset(&aead_req, 0, sizeof(aead_req));
+       memset(aead_req, 0, sizeof(aead_req_data));
 
        sg_init_one(&pt, data, data_len);
        sg_init_one(&assoc, &aad[2], be16_to_cpup((__be16 *)aad));
@@ -60,12 +61,12 @@ int ieee80211_aes_ccm_decrypt(struct crypto_aead *tfm, u8 *b_0, u8 *aad,
        sg_set_buf(&ct[0], data, data_len);
        sg_set_buf(&ct[1], mic, IEEE80211_CCMP_MIC_LEN);
 
-       aead_request_set_tfm(&aead_req.req, tfm);
-       aead_request_set_assoc(&aead_req.req, &assoc, assoc.length);
-       aead_request_set_crypt(&aead_req.req, ct, &pt,
+       aead_request_set_tfm(aead_req, tfm);
+       aead_request_set_assoc(aead_req, &assoc, assoc.length);
+       aead_request_set_crypt(aead_req, ct, &pt,
                               data_len + IEEE80211_CCMP_MIC_LEN, b_0);
 
-       return crypto_aead_decrypt(&aead_req.req);
+       return crypto_aead_decrypt(aead_req);
 }
 
 struct crypto_aead *ieee80211_aes_key_setup_encrypt(const u8 key[])
index aaa59d719592c0b7dc6ef3ddb4df8aaa578bc45c..7b8d3cf8957407f2decb1c87a3307e45055b9213 100644 (file)
@@ -109,6 +109,15 @@ static int ieee80211_change_iface(struct wiphy *wiphy,
 static int ieee80211_start_p2p_device(struct wiphy *wiphy,
                                      struct wireless_dev *wdev)
 {
+       struct ieee80211_sub_if_data *sdata = IEEE80211_WDEV_TO_SUB_IF(wdev);
+       int ret;
+
+       mutex_lock(&sdata->local->chanctx_mtx);
+       ret = ieee80211_check_combinations(sdata, NULL, 0, 0);
+       mutex_unlock(&sdata->local->chanctx_mtx);
+       if (ret < 0)
+               return ret;
+
        return ieee80211_do_open(wdev, true);
 }
 
@@ -972,13 +981,13 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev,
        sdata->needed_rx_chains = sdata->local->rx_chains;
 
        mutex_lock(&local->mtx);
-       sdata->radar_required = params->radar_required;
        err = ieee80211_vif_use_channel(sdata, &params->chandef,
                                        IEEE80211_CHANCTX_SHARED);
+       if (!err)
+               ieee80211_vif_copy_chanctx_to_vlans(sdata, false);
        mutex_unlock(&local->mtx);
        if (err)
                return err;
-       ieee80211_vif_copy_chanctx_to_vlans(sdata, false);
 
        /*
         * Apply control port protocol, this allows us to
@@ -1131,8 +1140,8 @@ static int ieee80211_stop_ap(struct wiphy *wiphy, struct net_device *dev)
        local->total_ps_buffered -= skb_queue_len(&sdata->u.ap.ps.bc_buf);
        skb_queue_purge(&sdata->u.ap.ps.bc_buf);
 
-       ieee80211_vif_copy_chanctx_to_vlans(sdata, true);
        mutex_lock(&local->mtx);
+       ieee80211_vif_copy_chanctx_to_vlans(sdata, true);
        ieee80211_vif_release_channel(sdata);
        mutex_unlock(&local->mtx);
 
@@ -1566,7 +1575,7 @@ static int ieee80211_change_station(struct wiphy *wiphy,
 
                if (sta->sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
                    sta->sdata->u.vlan.sta) {
-                       rcu_assign_pointer(sta->sdata->u.vlan.sta, NULL);
+                       RCU_INIT_POINTER(sta->sdata->u.vlan.sta, NULL);
                        prev_4addr = true;
                }
 
@@ -2930,7 +2939,6 @@ static int ieee80211_start_radar_detection(struct wiphy *wiphy,
        /* whatever, but channel contexts should not complain about that one */
        sdata->smps_mode = IEEE80211_SMPS_OFF;
        sdata->needed_rx_chains = local->rx_chains;
-       sdata->radar_required = true;
 
        err = ieee80211_vif_use_channel(sdata, chandef,
                                        IEEE80211_CHANCTX_SHARED);
@@ -3217,7 +3225,7 @@ int ieee80211_channel_switch(struct wiphy *wiphy, struct net_device *dev,
 {
        struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
        struct ieee80211_local *local = sdata->local;
-       struct ieee80211_chanctx_conf *chanctx_conf;
+       struct ieee80211_chanctx_conf *conf;
        struct ieee80211_chanctx *chanctx;
        int err, num_chanctx, changed = 0;
 
@@ -3233,23 +3241,24 @@ int ieee80211_channel_switch(struct wiphy *wiphy, struct net_device *dev,
                                       &sdata->vif.bss_conf.chandef))
                return -EINVAL;
 
-       rcu_read_lock();
-       chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
-       if (!chanctx_conf) {
-               rcu_read_unlock();
+       mutex_lock(&local->chanctx_mtx);
+       conf = rcu_dereference_protected(sdata->vif.chanctx_conf,
+                                        lockdep_is_held(&local->chanctx_mtx));
+       if (!conf) {
+               mutex_unlock(&local->chanctx_mtx);
                return -EBUSY;
        }
 
        /* don't handle for multi-VIF cases */
-       chanctx = container_of(chanctx_conf, struct ieee80211_chanctx, conf);
-       if (chanctx->refcount > 1) {
-               rcu_read_unlock();
+       chanctx = container_of(conf, struct ieee80211_chanctx, conf);
+       if (ieee80211_chanctx_refcount(local, chanctx) > 1) {
+               mutex_unlock(&local->chanctx_mtx);
                return -EBUSY;
        }
        num_chanctx = 0;
        list_for_each_entry_rcu(chanctx, &local->chanctx_list, list)
                num_chanctx++;
-       rcu_read_unlock();
+       mutex_unlock(&local->chanctx_mtx);
 
        if (num_chanctx > 1)
                return -EBUSY;
@@ -3949,6 +3958,21 @@ static int ieee80211_set_qos_map(struct wiphy *wiphy,
        return 0;
 }
 
+static int ieee80211_set_ap_chanwidth(struct wiphy *wiphy,
+                                     struct net_device *dev,
+                                     struct cfg80211_chan_def *chandef)
+{
+       struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+       int ret;
+       u32 changed = 0;
+
+       ret = ieee80211_vif_change_bandwidth(sdata, chandef, &changed);
+       if (ret == 0)
+               ieee80211_bss_info_change_notify(sdata, changed);
+
+       return ret;
+}
+
 const struct cfg80211_ops mac80211_config_ops = {
        .add_virtual_intf = ieee80211_add_iface,
        .del_virtual_intf = ieee80211_del_iface,
@@ -4029,4 +4053,5 @@ const struct cfg80211_ops mac80211_config_ops = {
        .start_radar_detection = ieee80211_start_radar_detection,
        .channel_switch = ieee80211_channel_switch,
        .set_qos_map = ieee80211_set_qos_map,
+       .set_ap_chanwidth = ieee80211_set_ap_chanwidth,
 };
index 75b5dd2c9267f10e8cb0e5680c3aa11c94a5dbe4..48e6d6f010cd0f26acd379fea0c601bf8d67e33f 100644 (file)
@@ -9,6 +9,170 @@
 #include "ieee80211_i.h"
 #include "driver-ops.h"
 
+static int ieee80211_chanctx_num_assigned(struct ieee80211_local *local,
+                                         struct ieee80211_chanctx *ctx)
+{
+       struct ieee80211_sub_if_data *sdata;
+       int num = 0;
+
+       lockdep_assert_held(&local->chanctx_mtx);
+
+       list_for_each_entry(sdata, &ctx->assigned_vifs, assigned_chanctx_list)
+               num++;
+
+       return num;
+}
+
+static int ieee80211_chanctx_num_reserved(struct ieee80211_local *local,
+                                         struct ieee80211_chanctx *ctx)
+{
+       struct ieee80211_sub_if_data *sdata;
+       int num = 0;
+
+       lockdep_assert_held(&local->chanctx_mtx);
+
+       list_for_each_entry(sdata, &ctx->reserved_vifs, reserved_chanctx_list)
+               num++;
+
+       return num;
+}
+
+int ieee80211_chanctx_refcount(struct ieee80211_local *local,
+                              struct ieee80211_chanctx *ctx)
+{
+       return ieee80211_chanctx_num_assigned(local, ctx) +
+              ieee80211_chanctx_num_reserved(local, ctx);
+}
+
+static int ieee80211_num_chanctx(struct ieee80211_local *local)
+{
+       struct ieee80211_chanctx *ctx;
+       int num = 0;
+
+       lockdep_assert_held(&local->chanctx_mtx);
+
+       list_for_each_entry(ctx, &local->chanctx_list, list)
+               num++;
+
+       return num;
+}
+
+static bool ieee80211_can_create_new_chanctx(struct ieee80211_local *local)
+{
+       lockdep_assert_held(&local->chanctx_mtx);
+       return ieee80211_num_chanctx(local) < ieee80211_max_num_channels(local);
+}
+
+static const struct cfg80211_chan_def *
+ieee80211_chanctx_reserved_chandef(struct ieee80211_local *local,
+                                  struct ieee80211_chanctx *ctx,
+                                  const struct cfg80211_chan_def *compat)
+{
+       struct ieee80211_sub_if_data *sdata;
+
+       lockdep_assert_held(&local->chanctx_mtx);
+
+       list_for_each_entry(sdata, &ctx->reserved_vifs,
+                           reserved_chanctx_list) {
+               if (!compat)
+                       compat = &sdata->reserved_chandef;
+
+               compat = cfg80211_chandef_compatible(&sdata->reserved_chandef,
+                                                    compat);
+               if (!compat)
+                       break;
+       }
+
+       return compat;
+}
+
+static const struct cfg80211_chan_def *
+ieee80211_chanctx_non_reserved_chandef(struct ieee80211_local *local,
+                                      struct ieee80211_chanctx *ctx,
+                                      const struct cfg80211_chan_def *compat)
+{
+       struct ieee80211_sub_if_data *sdata;
+
+       lockdep_assert_held(&local->chanctx_mtx);
+
+       list_for_each_entry(sdata, &ctx->assigned_vifs,
+                           assigned_chanctx_list) {
+               if (sdata->reserved_chanctx != NULL)
+                       continue;
+
+               if (!compat)
+                       compat = &sdata->vif.bss_conf.chandef;
+
+               compat = cfg80211_chandef_compatible(
+                               &sdata->vif.bss_conf.chandef, compat);
+               if (!compat)
+                       break;
+       }
+
+       return compat;
+}
+
+static const struct cfg80211_chan_def *
+ieee80211_chanctx_combined_chandef(struct ieee80211_local *local,
+                                  struct ieee80211_chanctx *ctx,
+                                  const struct cfg80211_chan_def *compat)
+{
+       lockdep_assert_held(&local->chanctx_mtx);
+
+       compat = ieee80211_chanctx_reserved_chandef(local, ctx, compat);
+       if (!compat)
+               return NULL;
+
+       compat = ieee80211_chanctx_non_reserved_chandef(local, ctx, compat);
+       if (!compat)
+               return NULL;
+
+       return compat;
+}
+
+static bool
+ieee80211_chanctx_can_reserve_chandef(struct ieee80211_local *local,
+                                     struct ieee80211_chanctx *ctx,
+                                     const struct cfg80211_chan_def *def)
+{
+       lockdep_assert_held(&local->chanctx_mtx);
+
+       if (ieee80211_chanctx_combined_chandef(local, ctx, def))
+               return true;
+
+       if (!list_empty(&ctx->reserved_vifs) &&
+           ieee80211_chanctx_reserved_chandef(local, ctx, def))
+               return true;
+
+       return false;
+}
+
+static struct ieee80211_chanctx *
+ieee80211_find_reservation_chanctx(struct ieee80211_local *local,
+                                  const struct cfg80211_chan_def *chandef,
+                                  enum ieee80211_chanctx_mode mode)
+{
+       struct ieee80211_chanctx *ctx;
+
+       lockdep_assert_held(&local->chanctx_mtx);
+
+       if (mode == IEEE80211_CHANCTX_EXCLUSIVE)
+               return NULL;
+
+       list_for_each_entry(ctx, &local->chanctx_list, list) {
+               if (ctx->mode == IEEE80211_CHANCTX_EXCLUSIVE)
+                       continue;
+
+               if (!ieee80211_chanctx_can_reserve_chandef(local, ctx,
+                                                          chandef))
+                       continue;
+
+               return ctx;
+       }
+
+       return NULL;
+}
+
 static enum nl80211_chan_width ieee80211_get_sta_bw(struct ieee80211_sta *sta)
 {
        switch (sta->bandwidth) {
@@ -190,6 +354,11 @@ ieee80211_find_chanctx(struct ieee80211_local *local,
                if (!compat)
                        continue;
 
+               compat = ieee80211_chanctx_reserved_chandef(local, ctx,
+                                                           compat);
+               if (!compat)
+                       continue;
+
                ieee80211_change_chanctx(local, ctx, compat);
 
                return ctx;
@@ -217,62 +386,91 @@ static bool ieee80211_is_radar_required(struct ieee80211_local *local)
 }
 
 static struct ieee80211_chanctx *
-ieee80211_new_chanctx(struct ieee80211_local *local,
-                     const struct cfg80211_chan_def *chandef,
-                     enum ieee80211_chanctx_mode mode)
+ieee80211_alloc_chanctx(struct ieee80211_local *local,
+                       const struct cfg80211_chan_def *chandef,
+                       enum ieee80211_chanctx_mode mode)
 {
        struct ieee80211_chanctx *ctx;
-       u32 changed;
-       int err;
 
        lockdep_assert_held(&local->chanctx_mtx);
 
        ctx = kzalloc(sizeof(*ctx) + local->hw.chanctx_data_size, GFP_KERNEL);
        if (!ctx)
-               return ERR_PTR(-ENOMEM);
+               return NULL;
 
+       INIT_LIST_HEAD(&ctx->assigned_vifs);
+       INIT_LIST_HEAD(&ctx->reserved_vifs);
        ctx->conf.def = *chandef;
        ctx->conf.rx_chains_static = 1;
        ctx->conf.rx_chains_dynamic = 1;
        ctx->mode = mode;
        ctx->conf.radar_enabled = ieee80211_is_radar_required(local);
        ieee80211_recalc_chanctx_min_def(local, ctx);
+
+       return ctx;
+}
+
+static int ieee80211_add_chanctx(struct ieee80211_local *local,
+                                struct ieee80211_chanctx *ctx)
+{
+       u32 changed;
+       int err;
+
+       lockdep_assert_held(&local->mtx);
+       lockdep_assert_held(&local->chanctx_mtx);
+
        if (!local->use_chanctx)
                local->hw.conf.radar_enabled = ctx->conf.radar_enabled;
 
-       /* we hold the mutex to prevent idle from changing */
-       lockdep_assert_held(&local->mtx);
        /* turn idle off *before* setting channel -- some drivers need that */
        changed = ieee80211_idle_off(local);
        if (changed)
                ieee80211_hw_config(local, changed);
 
        if (!local->use_chanctx) {
-               local->_oper_chandef = *chandef;
+               local->_oper_chandef = ctx->conf.def;
                ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL);
        } else {
                err = drv_add_chanctx(local, ctx);
                if (err) {
-                       kfree(ctx);
                        ieee80211_recalc_idle(local);
-                       return ERR_PTR(err);
+                       return err;
                }
        }
 
-       /* and keep the mutex held until the new chanctx is on the list */
-       list_add_rcu(&ctx->list, &local->chanctx_list);
+       return 0;
+}
+
+static struct ieee80211_chanctx *
+ieee80211_new_chanctx(struct ieee80211_local *local,
+                     const struct cfg80211_chan_def *chandef,
+                     enum ieee80211_chanctx_mode mode)
+{
+       struct ieee80211_chanctx *ctx;
+       int err;
+
+       lockdep_assert_held(&local->mtx);
+       lockdep_assert_held(&local->chanctx_mtx);
 
+       ctx = ieee80211_alloc_chanctx(local, chandef, mode);
+       if (!ctx)
+               return ERR_PTR(-ENOMEM);
+
+       err = ieee80211_add_chanctx(local, ctx);
+       if (err) {
+               kfree(ctx);
+               return ERR_PTR(err);
+       }
+
+       list_add_rcu(&ctx->list, &local->chanctx_list);
        return ctx;
 }
 
-static void ieee80211_free_chanctx(struct ieee80211_local *local,
-                                  struct ieee80211_chanctx *ctx)
+static void ieee80211_del_chanctx(struct ieee80211_local *local,
+                                 struct ieee80211_chanctx *ctx)
 {
-       bool check_single_channel = false;
        lockdep_assert_held(&local->chanctx_mtx);
 
-       WARN_ON_ONCE(ctx->refcount != 0);
-
        if (!local->use_chanctx) {
                struct cfg80211_chan_def *chandef = &local->_oper_chandef;
                chandef->width = NL80211_CHAN_WIDTH_20_NOHT;
@@ -282,8 +480,9 @@ static void ieee80211_free_chanctx(struct ieee80211_local *local,
                /* NOTE: Disabling radar is only valid here for
                 * single channel context. To be sure, check it ...
                 */
-               if (local->hw.conf.radar_enabled)
-                       check_single_channel = true;
+               WARN_ON(local->hw.conf.radar_enabled &&
+                       !list_empty(&local->chanctx_list));
+
                local->hw.conf.radar_enabled = false;
 
                ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL);
@@ -291,39 +490,19 @@ static void ieee80211_free_chanctx(struct ieee80211_local *local,
                drv_remove_chanctx(local, ctx);
        }
 
-       list_del_rcu(&ctx->list);
-       kfree_rcu(ctx, rcu_head);
-
-       /* throw a warning if this wasn't the only channel context. */
-       WARN_ON(check_single_channel && !list_empty(&local->chanctx_list));
-
        ieee80211_recalc_idle(local);
 }
 
-static int ieee80211_assign_vif_chanctx(struct ieee80211_sub_if_data *sdata,
-                                       struct ieee80211_chanctx *ctx)
+static void ieee80211_free_chanctx(struct ieee80211_local *local,
+                                  struct ieee80211_chanctx *ctx)
 {
-       struct ieee80211_local *local = sdata->local;
-       int ret;
-
        lockdep_assert_held(&local->chanctx_mtx);
 
-       ret = drv_assign_vif_chanctx(local, sdata, ctx);
-       if (ret)
-               return ret;
-
-       rcu_assign_pointer(sdata->vif.chanctx_conf, &ctx->conf);
-       ctx->refcount++;
-
-       ieee80211_recalc_txpower(sdata);
-       ieee80211_recalc_chanctx_min_def(local, ctx);
-       sdata->vif.bss_conf.idle = false;
+       WARN_ON_ONCE(ieee80211_chanctx_refcount(local, ctx) != 0);
 
-       if (sdata->vif.type != NL80211_IFTYPE_P2P_DEVICE &&
-           sdata->vif.type != NL80211_IFTYPE_MONITOR)
-               ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_IDLE);
-
-       return 0;
+       list_del_rcu(&ctx->list);
+       ieee80211_del_chanctx(local, ctx);
+       kfree_rcu(ctx, rcu_head);
 }
 
 static void ieee80211_recalc_chanctx_chantype(struct ieee80211_local *local,
@@ -384,30 +563,58 @@ static void ieee80211_recalc_radar_chanctx(struct ieee80211_local *local,
        drv_change_chanctx(local, chanctx, IEEE80211_CHANCTX_CHANGE_RADAR);
 }
 
-static void ieee80211_unassign_vif_chanctx(struct ieee80211_sub_if_data *sdata,
-                                          struct ieee80211_chanctx *ctx)
+static int ieee80211_assign_vif_chanctx(struct ieee80211_sub_if_data *sdata,
+                                       struct ieee80211_chanctx *new_ctx)
 {
        struct ieee80211_local *local = sdata->local;
+       struct ieee80211_chanctx_conf *conf;
+       struct ieee80211_chanctx *curr_ctx = NULL;
+       int ret = 0;
 
-       lockdep_assert_held(&local->chanctx_mtx);
+       conf = rcu_dereference_protected(sdata->vif.chanctx_conf,
+                                        lockdep_is_held(&local->chanctx_mtx));
 
-       ctx->refcount--;
-       rcu_assign_pointer(sdata->vif.chanctx_conf, NULL);
+       if (conf) {
+               curr_ctx = container_of(conf, struct ieee80211_chanctx, conf);
 
-       sdata->vif.bss_conf.idle = true;
+               drv_unassign_vif_chanctx(local, sdata, curr_ctx);
+               conf = NULL;
+               list_del(&sdata->assigned_chanctx_list);
+       }
 
-       if (sdata->vif.type != NL80211_IFTYPE_P2P_DEVICE &&
-           sdata->vif.type != NL80211_IFTYPE_MONITOR)
-               ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_IDLE);
+       if (new_ctx) {
+               ret = drv_assign_vif_chanctx(local, sdata, new_ctx);
+               if (ret)
+                       goto out;
 
-       drv_unassign_vif_chanctx(local, sdata, ctx);
+               conf = &new_ctx->conf;
+               list_add(&sdata->assigned_chanctx_list,
+                        &new_ctx->assigned_vifs);
+       }
 
-       if (ctx->refcount > 0) {
-               ieee80211_recalc_chanctx_chantype(sdata->local, ctx);
-               ieee80211_recalc_smps_chanctx(local, ctx);
-               ieee80211_recalc_radar_chanctx(local, ctx);
-               ieee80211_recalc_chanctx_min_def(local, ctx);
+out:
+       rcu_assign_pointer(sdata->vif.chanctx_conf, conf);
+
+       sdata->vif.bss_conf.idle = !conf;
+
+       if (curr_ctx && ieee80211_chanctx_num_assigned(local, curr_ctx) > 0) {
+               ieee80211_recalc_chanctx_chantype(local, curr_ctx);
+               ieee80211_recalc_smps_chanctx(local, curr_ctx);
+               ieee80211_recalc_radar_chanctx(local, curr_ctx);
+               ieee80211_recalc_chanctx_min_def(local, curr_ctx);
        }
+
+       if (new_ctx && ieee80211_chanctx_num_assigned(local, new_ctx) > 0) {
+               ieee80211_recalc_txpower(sdata);
+               ieee80211_recalc_chanctx_min_def(local, new_ctx);
+       }
+
+       if (sdata->vif.type != NL80211_IFTYPE_P2P_DEVICE &&
+           sdata->vif.type != NL80211_IFTYPE_MONITOR)
+               ieee80211_bss_info_change_notify(sdata,
+                                                BSS_CHANGED_IDLE);
+
+       return ret;
 }
 
 static void __ieee80211_vif_release_channel(struct ieee80211_sub_if_data *sdata)
@@ -425,8 +632,11 @@ static void __ieee80211_vif_release_channel(struct ieee80211_sub_if_data *sdata)
 
        ctx = container_of(conf, struct ieee80211_chanctx, conf);
 
-       ieee80211_unassign_vif_chanctx(sdata, ctx);
-       if (ctx->refcount == 0)
+       if (sdata->reserved_chanctx)
+               ieee80211_vif_unreserve_chanctx(sdata);
+
+       ieee80211_assign_vif_chanctx(sdata, NULL);
+       if (ieee80211_chanctx_refcount(local, ctx) == 0)
                ieee80211_free_chanctx(local, ctx);
 }
 
@@ -526,6 +736,7 @@ int ieee80211_vif_use_channel(struct ieee80211_sub_if_data *sdata,
 {
        struct ieee80211_local *local = sdata->local;
        struct ieee80211_chanctx *ctx;
+       u8 radar_detect_width = 0;
        int ret;
 
        lockdep_assert_held(&local->mtx);
@@ -533,6 +744,22 @@ int ieee80211_vif_use_channel(struct ieee80211_sub_if_data *sdata,
        WARN_ON(sdata->dev && netif_carrier_ok(sdata->dev));
 
        mutex_lock(&local->chanctx_mtx);
+
+       ret = cfg80211_chandef_dfs_required(local->hw.wiphy,
+                                           chandef,
+                                           sdata->wdev.iftype);
+       if (ret < 0)
+               goto out;
+       if (ret > 0)
+               radar_detect_width = BIT(chandef->width);
+
+       sdata->radar_required = ret;
+
+       ret = ieee80211_check_combinations(sdata, chandef, mode,
+                                          radar_detect_width);
+       if (ret < 0)
+               goto out;
+
        __ieee80211_vif_release_channel(sdata);
 
        ctx = ieee80211_find_chanctx(local, chandef, mode);
@@ -548,7 +775,7 @@ int ieee80211_vif_use_channel(struct ieee80211_sub_if_data *sdata,
        ret = ieee80211_assign_vif_chanctx(sdata, ctx);
        if (ret) {
                /* if assign fails refcount stays the same */
-               if (ctx->refcount == 0)
+               if (ieee80211_chanctx_refcount(local, ctx) == 0)
                        ieee80211_free_chanctx(local, ctx);
                goto out;
        }
@@ -560,15 +787,47 @@ int ieee80211_vif_use_channel(struct ieee80211_sub_if_data *sdata,
        return ret;
 }
 
+static int __ieee80211_vif_change_channel(struct ieee80211_sub_if_data *sdata,
+                                         struct ieee80211_chanctx *ctx,
+                                         u32 *changed)
+{
+       struct ieee80211_local *local = sdata->local;
+       const struct cfg80211_chan_def *chandef = &sdata->csa_chandef;
+       u32 chanctx_changed = 0;
+
+       if (!cfg80211_chandef_usable(sdata->local->hw.wiphy, chandef,
+                                    IEEE80211_CHAN_DISABLED))
+               return -EINVAL;
+
+       if (ieee80211_chanctx_refcount(local, ctx) != 1)
+               return -EINVAL;
+
+       if (sdata->vif.bss_conf.chandef.width != chandef->width) {
+               chanctx_changed = IEEE80211_CHANCTX_CHANGE_WIDTH;
+               *changed |= BSS_CHANGED_BANDWIDTH;
+       }
+
+       sdata->vif.bss_conf.chandef = *chandef;
+       ctx->conf.def = *chandef;
+
+       chanctx_changed |= IEEE80211_CHANCTX_CHANGE_CHANNEL;
+       drv_change_chanctx(local, ctx, chanctx_changed);
+
+       ieee80211_recalc_chanctx_chantype(local, ctx);
+       ieee80211_recalc_smps_chanctx(local, ctx);
+       ieee80211_recalc_radar_chanctx(local, ctx);
+       ieee80211_recalc_chanctx_min_def(local, ctx);
+
+       return 0;
+}
+
 int ieee80211_vif_change_channel(struct ieee80211_sub_if_data *sdata,
                                 u32 *changed)
 {
        struct ieee80211_local *local = sdata->local;
        struct ieee80211_chanctx_conf *conf;
        struct ieee80211_chanctx *ctx;
-       const struct cfg80211_chan_def *chandef = &sdata->csa_chandef;
        int ret;
-       u32 chanctx_changed = 0;
 
        lockdep_assert_held(&local->mtx);
 
@@ -576,11 +835,94 @@ int ieee80211_vif_change_channel(struct ieee80211_sub_if_data *sdata,
        if (WARN_ON(!sdata->vif.csa_active))
                return -EINVAL;
 
-       if (!cfg80211_chandef_usable(sdata->local->hw.wiphy, chandef,
-                                    IEEE80211_CHAN_DISABLED))
+       mutex_lock(&local->chanctx_mtx);
+       conf = rcu_dereference_protected(sdata->vif.chanctx_conf,
+                                        lockdep_is_held(&local->chanctx_mtx));
+       if (!conf) {
+               ret = -EINVAL;
+               goto out;
+       }
+
+       ctx = container_of(conf, struct ieee80211_chanctx, conf);
+
+       ret = __ieee80211_vif_change_channel(sdata, ctx, changed);
+ out:
+       mutex_unlock(&local->chanctx_mtx);
+       return ret;
+}
+
+static void
+__ieee80211_vif_copy_chanctx_to_vlans(struct ieee80211_sub_if_data *sdata,
+                                     bool clear)
+{
+       struct ieee80211_local *local = sdata->local;
+       struct ieee80211_sub_if_data *vlan;
+       struct ieee80211_chanctx_conf *conf;
+
+       if (WARN_ON(sdata->vif.type != NL80211_IFTYPE_AP))
+               return;
+
+       lockdep_assert_held(&local->mtx);
+
+       /* Check that conf exists, even when clearing this function
+        * must be called with the AP's channel context still there
+        * as it would otherwise cause VLANs to have an invalid
+        * channel context pointer for a while, possibly pointing
+        * to a channel context that has already been freed.
+        */
+       conf = rcu_dereference_protected(sdata->vif.chanctx_conf,
+                               lockdep_is_held(&local->chanctx_mtx));
+       WARN_ON(!conf);
+
+       if (clear)
+               conf = NULL;
+
+       list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list)
+               rcu_assign_pointer(vlan->vif.chanctx_conf, conf);
+}
+
+void ieee80211_vif_copy_chanctx_to_vlans(struct ieee80211_sub_if_data *sdata,
+                                        bool clear)
+{
+       struct ieee80211_local *local = sdata->local;
+
+       mutex_lock(&local->chanctx_mtx);
+
+       __ieee80211_vif_copy_chanctx_to_vlans(sdata, clear);
+
+       mutex_unlock(&local->chanctx_mtx);
+}
+
+int ieee80211_vif_unreserve_chanctx(struct ieee80211_sub_if_data *sdata)
+{
+       struct ieee80211_chanctx *ctx = sdata->reserved_chanctx;
+
+       lockdep_assert_held(&sdata->local->chanctx_mtx);
+
+       if (WARN_ON(!ctx))
                return -EINVAL;
 
+       list_del(&sdata->reserved_chanctx_list);
+       sdata->reserved_chanctx = NULL;
+
+       if (ieee80211_chanctx_refcount(sdata->local, ctx) == 0)
+               ieee80211_free_chanctx(sdata->local, ctx);
+
+       return 0;
+}
+
+int ieee80211_vif_reserve_chanctx(struct ieee80211_sub_if_data *sdata,
+                                 const struct cfg80211_chan_def *chandef,
+                                 enum ieee80211_chanctx_mode mode,
+                                 bool radar_required)
+{
+       struct ieee80211_local *local = sdata->local;
+       struct ieee80211_chanctx_conf *conf;
+       struct ieee80211_chanctx *new_ctx, *curr_ctx;
+       int ret = 0;
+
        mutex_lock(&local->chanctx_mtx);
+
        conf = rcu_dereference_protected(sdata->vif.chanctx_conf,
                                         lockdep_is_held(&local->chanctx_mtx));
        if (!conf) {
@@ -588,30 +930,108 @@ int ieee80211_vif_change_channel(struct ieee80211_sub_if_data *sdata,
                goto out;
        }
 
-       ctx = container_of(conf, struct ieee80211_chanctx, conf);
-       if (ctx->refcount != 1) {
+       curr_ctx = container_of(conf, struct ieee80211_chanctx, conf);
+
+       new_ctx = ieee80211_find_reservation_chanctx(local, chandef, mode);
+       if (!new_ctx) {
+               if (ieee80211_chanctx_refcount(local, curr_ctx) == 1 &&
+                   (local->hw.flags & IEEE80211_HW_CHANGE_RUNNING_CHANCTX)) {
+                       /* if we're the only users of the chanctx and
+                        * the driver supports changing a running
+                        * context, reserve our current context
+                        */
+                       new_ctx = curr_ctx;
+               } else if (ieee80211_can_create_new_chanctx(local)) {
+                       /* create a new context and reserve it */
+                       new_ctx = ieee80211_new_chanctx(local, chandef, mode);
+                       if (IS_ERR(new_ctx)) {
+                               ret = PTR_ERR(new_ctx);
+                               goto out;
+                       }
+               } else {
+                       ret = -EBUSY;
+                       goto out;
+               }
+       }
+
+       list_add(&sdata->reserved_chanctx_list, &new_ctx->reserved_vifs);
+       sdata->reserved_chanctx = new_ctx;
+       sdata->reserved_chandef = *chandef;
+       sdata->reserved_radar_required = radar_required;
+out:
+       mutex_unlock(&local->chanctx_mtx);
+       return ret;
+}
+
+int ieee80211_vif_use_reserved_context(struct ieee80211_sub_if_data *sdata,
+                                      u32 *changed)
+{
+       struct ieee80211_local *local = sdata->local;
+       struct ieee80211_chanctx *ctx;
+       struct ieee80211_chanctx *old_ctx;
+       struct ieee80211_chanctx_conf *conf;
+       int ret;
+       u32 tmp_changed = *changed;
+
+       /* TODO: need to recheck if the chandef is usable etc.? */
+
+       lockdep_assert_held(&local->mtx);
+
+       mutex_lock(&local->chanctx_mtx);
+
+       ctx = sdata->reserved_chanctx;
+       if (WARN_ON(!ctx)) {
                ret = -EINVAL;
                goto out;
        }
 
-       if (sdata->vif.bss_conf.chandef.width != chandef->width) {
-               chanctx_changed = IEEE80211_CHANCTX_CHANGE_WIDTH;
-               *changed |= BSS_CHANGED_BANDWIDTH;
+       conf = rcu_dereference_protected(sdata->vif.chanctx_conf,
+                                        lockdep_is_held(&local->chanctx_mtx));
+       if (!conf) {
+               ret = -EINVAL;
+               goto out;
        }
 
-       sdata->vif.bss_conf.chandef = *chandef;
-       ctx->conf.def = *chandef;
+       old_ctx = container_of(conf, struct ieee80211_chanctx, conf);
 
-       chanctx_changed |= IEEE80211_CHANCTX_CHANGE_CHANNEL;
-       drv_change_chanctx(local, ctx, chanctx_changed);
+       if (sdata->vif.bss_conf.chandef.width != sdata->reserved_chandef.width)
+               tmp_changed |= BSS_CHANGED_BANDWIDTH;
+
+       sdata->vif.bss_conf.chandef = sdata->reserved_chandef;
+
+       /* unref our reservation */
+       sdata->reserved_chanctx = NULL;
+       sdata->radar_required = sdata->reserved_radar_required;
+       list_del(&sdata->reserved_chanctx_list);
+
+       if (old_ctx == ctx) {
+               /* This is our own context, just change it */
+               ret = __ieee80211_vif_change_channel(sdata, old_ctx,
+                                                    &tmp_changed);
+               if (ret)
+                       goto out;
+       } else {
+               ret = ieee80211_assign_vif_chanctx(sdata, ctx);
+               if (ieee80211_chanctx_refcount(local, old_ctx) == 0)
+                       ieee80211_free_chanctx(local, old_ctx);
+               if (ret) {
+                       /* if assign fails refcount stays the same */
+                       if (ieee80211_chanctx_refcount(local, ctx) == 0)
+                               ieee80211_free_chanctx(local, ctx);
+                       goto out;
+               }
+
+               if (sdata->vif.type == NL80211_IFTYPE_AP)
+                       __ieee80211_vif_copy_chanctx_to_vlans(sdata, false);
+       }
+
+       *changed = tmp_changed;
 
        ieee80211_recalc_chanctx_chantype(local, ctx);
        ieee80211_recalc_smps_chanctx(local, ctx);
        ieee80211_recalc_radar_chanctx(local, ctx);
        ieee80211_recalc_chanctx_min_def(local, ctx);
-
-       ret = 0;
- out:
+out:
        mutex_unlock(&local->chanctx_mtx);
        return ret;
 }
@@ -695,40 +1115,6 @@ void ieee80211_vif_vlan_copy_chanctx(struct ieee80211_sub_if_data *sdata)
        mutex_unlock(&local->chanctx_mtx);
 }
 
-void ieee80211_vif_copy_chanctx_to_vlans(struct ieee80211_sub_if_data *sdata,
-                                        bool clear)
-{
-       struct ieee80211_local *local = sdata->local;
-       struct ieee80211_sub_if_data *vlan;
-       struct ieee80211_chanctx_conf *conf;
-
-       ASSERT_RTNL();
-
-       if (WARN_ON(sdata->vif.type != NL80211_IFTYPE_AP))
-               return;
-
-       mutex_lock(&local->chanctx_mtx);
-
-       /*
-        * Check that conf exists, even when clearing this function
-        * must be called with the AP's channel context still there
-        * as it would otherwise cause VLANs to have an invalid
-        * channel context pointer for a while, possibly pointing
-        * to a channel context that has already been freed.
-        */
-       conf = rcu_dereference_protected(sdata->vif.chanctx_conf,
-                               lockdep_is_held(&local->chanctx_mtx));
-       WARN_ON(!conf);
-
-       if (clear)
-               conf = NULL;
-
-       list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list)
-               rcu_assign_pointer(vlan->vif.chanctx_conf, conf);
-
-       mutex_unlock(&local->chanctx_mtx);
-}
-
 void ieee80211_iter_chan_contexts_atomic(
        struct ieee80211_hw *hw,
        void (*iter)(struct ieee80211_hw *hw,
index fa16e54980a1d3e76ce2f85fcb3253eb2599e838..0e963bc1ceac3109f378431a617b853a7166df37 100644 (file)
@@ -128,7 +128,7 @@ static ssize_t sta_tx_latency_stat_write(struct file *file,
        if (!strcmp(buf, TX_LATENCY_DISABLED)) {
                if (!tx_latency)
                        goto unlock;
-               rcu_assign_pointer(local->tx_latency, NULL);
+               RCU_INIT_POINTER(local->tx_latency, NULL);
                synchronize_rcu();
                kfree(tx_latency);
                goto unlock;
index 214ed4ecd739f10ae201e6dfa9112c0dd943f5b8..60c35afee29d551727a2969b25b84998ca5501c4 100644 (file)
@@ -1,6 +1,8 @@
 #ifndef __MAC80211_DEBUGFS_H
 #define __MAC80211_DEBUGFS_H
 
+#include "ieee80211_i.h"
+
 #ifdef CONFIG_MAC80211_DEBUGFS
 void debugfs_hw_add(struct ieee80211_local *local);
 int __printf(4, 5) mac80211_format_buffer(char __user *userbuf, size_t count,
index 79025e79f4d6459dd99de5ad496e351e123f53b7..9f5501a9a79506266decdb83d0ad78c9d6e9bdf9 100644 (file)
@@ -3,6 +3,8 @@
 #ifndef __IEEE80211_DEBUGFS_NETDEV_H
 #define __IEEE80211_DEBUGFS_NETDEV_H
 
+#include "ieee80211_i.h"
+
 #ifdef CONFIG_MAC80211_DEBUGFS
 void ieee80211_debugfs_add_netdev(struct ieee80211_sub_if_data *sdata);
 void ieee80211_debugfs_remove_netdev(struct ieee80211_sub_if_data *sdata);
index fc689f5d971e259381f0a26e13079f1a727fe704..5331582a2c817184db6f72ee9e2aa6f6841a10f0 100644 (file)
@@ -726,13 +726,19 @@ static inline void drv_rfkill_poll(struct ieee80211_local *local)
 }
 
 static inline void drv_flush(struct ieee80211_local *local,
+                            struct ieee80211_sub_if_data *sdata,
                             u32 queues, bool drop)
 {
+       struct ieee80211_vif *vif = sdata ? &sdata->vif : NULL;
+
        might_sleep();
 
+       if (sdata)
+               check_sdata_in_driver(sdata);
+
        trace_drv_flush(local, queues, drop);
        if (local->ops->flush)
-               local->ops->flush(&local->hw, queues, drop);
+               local->ops->flush(&local->hw, vif, queues, drop);
        trace_drv_return_void(local);
 }
 
index c150b68436d78ada5bfbb0825d128d8e89f916e3..15702ff64a4c89fb89541f620b9da7dcccc2a7bd 100644 (file)
@@ -31,6 +31,18 @@ static void __check_htcap_disable(struct ieee80211_ht_cap *ht_capa,
        }
 }
 
+static void __check_htcap_enable(struct ieee80211_ht_cap *ht_capa,
+                                 struct ieee80211_ht_cap *ht_capa_mask,
+                                 struct ieee80211_sta_ht_cap *ht_cap,
+                                 u16 flag)
+{
+       __le16 le_flag = cpu_to_le16(flag);
+
+       if ((ht_capa_mask->cap_info & le_flag) &&
+           (ht_capa->cap_info & le_flag))
+               ht_cap->cap |= flag;
+}
+
 void ieee80211_apply_htcap_overrides(struct ieee80211_sub_if_data *sdata,
                                     struct ieee80211_sta_ht_cap *ht_cap)
 {
@@ -59,7 +71,7 @@ void ieee80211_apply_htcap_overrides(struct ieee80211_sub_if_data *sdata,
        smask = (u8 *)(&ht_capa_mask->mcs.rx_mask);
 
        /* NOTE:  If you add more over-rides here, update register_hw
-        * ht_capa_mod_msk logic in main.c as well.
+        * ht_capa_mod_mask logic in main.c as well.
         * And, if this method can ever change ht_cap.ht_supported, fix
         * the check in ieee80211_add_ht_ie.
         */
@@ -86,6 +98,14 @@ void ieee80211_apply_htcap_overrides(struct ieee80211_sub_if_data *sdata,
        __check_htcap_disable(ht_capa, ht_capa_mask, ht_cap,
                              IEEE80211_HT_CAP_MAX_AMSDU);
 
+       /* Allow user to disable LDPC */
+       __check_htcap_disable(ht_capa, ht_capa_mask, ht_cap,
+                             IEEE80211_HT_CAP_LDPC_CODING);
+
+       /* Allow user to enable 40 MHz intolerant bit. */
+       __check_htcap_enable(ht_capa, ht_capa_mask, ht_cap,
+                            IEEE80211_HT_CAP_40MHZ_INTOLERANT);
+
        /* Allow user to decrease AMPDU factor */
        if (ht_capa_mask->ampdu_params_info &
            IEEE80211_HT_AMPDU_PARM_FACTOR) {
index 06d28787945b513e6672457a1e6990da0fd644d8..ff4d4155a84d26f982d820858dddf206a49dbba7 100644 (file)
@@ -228,7 +228,7 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
        struct beacon_data *presp;
        enum nl80211_bss_scan_width scan_width;
        bool have_higher_than_11mbit;
-       bool radar_required = false;
+       bool radar_required;
        int err;
 
        sdata_assert_lock(sdata);
@@ -253,7 +253,7 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
 
        presp = rcu_dereference_protected(ifibss->presp,
                                          lockdep_is_held(&sdata->wdev.mtx));
-       rcu_assign_pointer(ifibss->presp, NULL);
+       RCU_INIT_POINTER(ifibss->presp, NULL);
        if (presp)
                kfree_rcu(presp, rcu_head);
 
@@ -262,7 +262,8 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
        /* make a copy of the chandef, it could be modified below. */
        chandef = *req_chandef;
        chan = chandef.chan;
-       if (!cfg80211_reg_can_beacon(local->hw.wiphy, &chandef)) {
+       if (!cfg80211_reg_can_beacon(local->hw.wiphy, &chandef,
+                                    NL80211_IFTYPE_ADHOC)) {
                if (chandef.width == NL80211_CHAN_WIDTH_5 ||
                    chandef.width == NL80211_CHAN_WIDTH_10 ||
                    chandef.width == NL80211_CHAN_WIDTH_20_NOHT ||
@@ -274,7 +275,8 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
                chandef.width = NL80211_CHAN_WIDTH_20;
                chandef.center_freq1 = chan->center_freq;
                /* check again for downgraded chandef */
-               if (!cfg80211_reg_can_beacon(local->hw.wiphy, &chandef)) {
+               if (!cfg80211_reg_can_beacon(local->hw.wiphy, &chandef,
+                                            NL80211_IFTYPE_ADHOC)) {
                        sdata_info(sdata,
                                   "Failed to join IBSS, beacons forbidden\n");
                        return;
@@ -282,21 +284,20 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
        }
 
        err = cfg80211_chandef_dfs_required(sdata->local->hw.wiphy,
-                                           &chandef);
+                                           &chandef, NL80211_IFTYPE_ADHOC);
        if (err < 0) {
                sdata_info(sdata,
                           "Failed to join IBSS, invalid chandef\n");
                return;
        }
-       if (err > 0) {
-               if (!ifibss->userspace_handles_dfs) {
-                       sdata_info(sdata,
-                                  "Failed to join IBSS, DFS channel without control program\n");
-                       return;
-               }
-               radar_required = true;
+       if (err > 0 && !ifibss->userspace_handles_dfs) {
+               sdata_info(sdata,
+                          "Failed to join IBSS, DFS channel without control program\n");
+               return;
        }
 
+       radar_required = err;
+
        mutex_lock(&local->mtx);
        if (ieee80211_vif_use_channel(sdata, &chandef,
                                      ifibss->fixed_channel ?
@@ -775,7 +776,8 @@ static void ieee80211_ibss_csa_mark_radar(struct ieee80211_sub_if_data *sdata)
         * unavailable.
         */
        err = cfg80211_chandef_dfs_required(sdata->local->hw.wiphy,
-                                           &ifibss->chandef);
+                                           &ifibss->chandef,
+                                           NL80211_IFTYPE_ADHOC);
        if (err > 0)
                cfg80211_radar_event(sdata->local->hw.wiphy, &ifibss->chandef,
                                     GFP_ATOMIC);
@@ -861,7 +863,8 @@ ieee80211_ibss_process_chanswitch(struct ieee80211_sub_if_data *sdata,
                goto disconnect;
        }
 
-       if (!cfg80211_reg_can_beacon(sdata->local->hw.wiphy, &params.chandef)) {
+       if (!cfg80211_reg_can_beacon(sdata->local->hw.wiphy, &params.chandef,
+                                    NL80211_IFTYPE_ADHOC)) {
                sdata_info(sdata,
                           "IBSS %pM switches to unsupported channel (%d MHz, width:%d, CF1/2: %d/%d MHz), disconnecting\n",
                           ifibss->bssid,
@@ -873,17 +876,17 @@ ieee80211_ibss_process_chanswitch(struct ieee80211_sub_if_data *sdata,
        }
 
        err = cfg80211_chandef_dfs_required(sdata->local->hw.wiphy,
-                                           &params.chandef);
+                                           &params.chandef,
+                                           NL80211_IFTYPE_ADHOC);
        if (err < 0)
                goto disconnect;
-       if (err) {
+       if (err > 0 && !ifibss->userspace_handles_dfs) {
                /* IBSS-DFS only allowed with a control program */
-               if (!ifibss->userspace_handles_dfs)
-                       goto disconnect;
-
-               params.radar_required = true;
+               goto disconnect;
        }
 
+       params.radar_required = err;
+
        if (cfg80211_chandef_identical(&params.chandef,
                                       &sdata->vif.bss_conf.chandef)) {
                ibss_dbg(sdata,
@@ -1636,7 +1639,33 @@ int ieee80211_ibss_join(struct ieee80211_sub_if_data *sdata,
        u32 changed = 0;
        u32 rate_flags;
        struct ieee80211_supported_band *sband;
+       enum ieee80211_chanctx_mode chanmode;
+       struct ieee80211_local *local = sdata->local;
+       int radar_detect_width = 0;
        int i;
+       int ret;
+
+       ret = cfg80211_chandef_dfs_required(local->hw.wiphy,
+                                           &params->chandef,
+                                           sdata->wdev.iftype);
+       if (ret < 0)
+               return ret;
+
+       if (ret > 0) {
+               if (!params->userspace_handles_dfs)
+                       return -EINVAL;
+               radar_detect_width = BIT(params->chandef.width);
+       }
+
+       chanmode = (params->channel_fixed && !ret) ?
+               IEEE80211_CHANCTX_SHARED : IEEE80211_CHANCTX_EXCLUSIVE;
+
+       mutex_lock(&local->chanctx_mtx);
+       ret = ieee80211_check_combinations(sdata, &params->chandef, chanmode,
+                                          radar_detect_width);
+       mutex_unlock(&local->chanctx_mtx);
+       if (ret < 0)
+               return ret;
 
        if (params->bssid) {
                memcpy(sdata->u.ibss.bssid, params->bssid, ETH_ALEN);
@@ -1651,7 +1680,7 @@ int ieee80211_ibss_join(struct ieee80211_sub_if_data *sdata,
 
        /* fix basic_rates if channel does not support these rates */
        rate_flags = ieee80211_chandef_rate_flags(&params->chandef);
-       sband = sdata->local->hw.wiphy->bands[params->chandef.chan->band];
+       sband = local->hw.wiphy->bands[params->chandef.chan->band];
        for (i = 0; i < sband->n_bitrates; i++) {
                if ((rate_flags & sband->bitrates[i].flags) != rate_flags)
                        sdata->u.ibss.basic_rates &= ~BIT(i);
@@ -1700,9 +1729,9 @@ int ieee80211_ibss_join(struct ieee80211_sub_if_data *sdata,
        ieee80211_bss_info_change_notify(sdata, changed);
 
        sdata->smps_mode = IEEE80211_SMPS_OFF;
-       sdata->needed_rx_chains = sdata->local->rx_chains;
+       sdata->needed_rx_chains = local->rx_chains;
 
-       ieee80211_queue_work(&sdata->local->hw, &sdata->work);
+       ieee80211_queue_work(&local->hw, &sdata->work);
 
        return 0;
 }
index 222c28b75315f1ab43226e08566a5f911c6bacc7..b455f62d357ad6675bad1eda38c9019b6a7539c9 100644 (file)
@@ -260,7 +260,7 @@ struct ieee80211_if_ap {
 
        /* to be used after channel switch. */
        struct cfg80211_beacon_data *next_beacon;
-       struct list_head vlans;
+       struct list_head vlans; /* write-protected with RTNL and local->mtx */
 
        struct ps_data ps;
        atomic_t num_mcast_sta; /* number of stations receiving multicast */
@@ -276,7 +276,7 @@ struct ieee80211_if_wds {
 };
 
 struct ieee80211_if_vlan {
-       struct list_head list;
+       struct list_head list; /* write-protected with RTNL and local->mtx */
 
        /* used for all tx if the VLAN is configured to 4-addr mode */
        struct sta_info __rcu *sta;
@@ -691,8 +691,10 @@ struct ieee80211_chanctx {
        struct list_head list;
        struct rcu_head rcu_head;
 
+       struct list_head assigned_vifs;
+       struct list_head reserved_vifs;
+
        enum ieee80211_chanctx_mode mode;
-       int refcount;
        bool driver_present;
 
        struct ieee80211_chanctx_conf conf;
@@ -756,6 +758,14 @@ struct ieee80211_sub_if_data {
        bool csa_radar_required;
        struct cfg80211_chan_def csa_chandef;
 
+       struct list_head assigned_chanctx_list; /* protected by chanctx_mtx */
+       struct list_head reserved_chanctx_list; /* protected by chanctx_mtx */
+
+       /* context reservation -- protected with chanctx_mtx */
+       struct ieee80211_chanctx *reserved_chanctx;
+       struct cfg80211_chan_def reserved_chandef;
+       bool reserved_radar_required;
+
        /* used to reconfigure hardware SM PS */
        struct work_struct recalc_smps;
 
@@ -1770,6 +1780,16 @@ int __must_check
 ieee80211_vif_use_channel(struct ieee80211_sub_if_data *sdata,
                          const struct cfg80211_chan_def *chandef,
                          enum ieee80211_chanctx_mode mode);
+int __must_check
+ieee80211_vif_reserve_chanctx(struct ieee80211_sub_if_data *sdata,
+                             const struct cfg80211_chan_def *chandef,
+                             enum ieee80211_chanctx_mode mode,
+                             bool radar_required);
+int __must_check
+ieee80211_vif_use_reserved_context(struct ieee80211_sub_if_data *sdata,
+                                  u32 *changed);
+int ieee80211_vif_unreserve_chanctx(struct ieee80211_sub_if_data *sdata);
+
 int __must_check
 ieee80211_vif_change_bandwidth(struct ieee80211_sub_if_data *sdata,
                               const struct cfg80211_chan_def *chandef,
@@ -1782,6 +1802,8 @@ void ieee80211_vif_release_channel(struct ieee80211_sub_if_data *sdata);
 void ieee80211_vif_vlan_copy_chanctx(struct ieee80211_sub_if_data *sdata);
 void ieee80211_vif_copy_chanctx_to_vlans(struct ieee80211_sub_if_data *sdata,
                                         bool clear);
+int ieee80211_chanctx_refcount(struct ieee80211_local *local,
+                              struct ieee80211_chanctx *ctx);
 
 void ieee80211_recalc_smps_chanctx(struct ieee80211_local *local,
                                   struct ieee80211_chanctx *chanctx);
@@ -1805,6 +1827,11 @@ int ieee80211_cs_headroom(struct ieee80211_local *local,
                          enum nl80211_iftype iftype);
 void ieee80211_recalc_dtim(struct ieee80211_local *local,
                           struct ieee80211_sub_if_data *sdata);
+int ieee80211_check_combinations(struct ieee80211_sub_if_data *sdata,
+                                const struct cfg80211_chan_def *chandef,
+                                enum ieee80211_chanctx_mode chanmode,
+                                u8 radar_detect);
+int ieee80211_max_num_channels(struct ieee80211_local *local);
 
 #ifdef CONFIG_MAC80211_NOINLINE
 #define debug_noinline noinline
index b8d331e7d883d50fd4fc3adf12c2869ac3beedb7..7fff3dcaac43fb93034519a3f1b8b1aaae0579bc 100644 (file)
@@ -250,6 +250,7 @@ static int ieee80211_check_concurrent_iface(struct ieee80211_sub_if_data *sdata,
 {
        struct ieee80211_local *local = sdata->local;
        struct ieee80211_sub_if_data *nsdata;
+       int ret;
 
        ASSERT_RTNL();
 
@@ -300,7 +301,10 @@ static int ieee80211_check_concurrent_iface(struct ieee80211_sub_if_data *sdata,
                }
        }
 
-       return 0;
+       mutex_lock(&local->chanctx_mtx);
+       ret = ieee80211_check_combinations(sdata, NULL, 0, 0);
+       mutex_unlock(&local->chanctx_mtx);
+       return ret;
 }
 
 static int ieee80211_check_queues(struct ieee80211_sub_if_data *sdata,
@@ -423,7 +427,7 @@ int ieee80211_add_virtual_monitor(struct ieee80211_local *local)
        mutex_unlock(&local->mtx);
        if (ret) {
                mutex_lock(&local->iflist_mtx);
-               rcu_assign_pointer(local->monitor_sdata, NULL);
+               RCU_INIT_POINTER(local->monitor_sdata, NULL);
                mutex_unlock(&local->iflist_mtx);
                synchronize_net();
                drv_remove_interface(local, sdata);
@@ -452,7 +456,7 @@ void ieee80211_del_virtual_monitor(struct ieee80211_local *local)
                return;
        }
 
-       rcu_assign_pointer(local->monitor_sdata, NULL);
+       RCU_INIT_POINTER(local->monitor_sdata, NULL);
        mutex_unlock(&local->iflist_mtx);
 
        synchronize_net();
@@ -492,7 +496,9 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
                if (!sdata->bss)
                        return -ENOLINK;
 
+               mutex_lock(&local->mtx);
                list_add(&sdata->u.vlan.list, &sdata->bss->vlans);
+               mutex_unlock(&local->mtx);
 
                master = container_of(sdata->bss,
                                      struct ieee80211_sub_if_data, u.ap);
@@ -722,8 +728,11 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
                drv_stop(local);
  err_del_bss:
        sdata->bss = NULL;
-       if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
+       if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) {
+               mutex_lock(&local->mtx);
                list_del(&sdata->u.vlan.list);
+               mutex_unlock(&local->mtx);
+       }
        /* might already be clear but that doesn't matter */
        clear_bit(SDATA_STATE_RUNNING, &sdata->state);
        return res;
@@ -875,8 +884,10 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
 
        switch (sdata->vif.type) {
        case NL80211_IFTYPE_AP_VLAN:
+               mutex_lock(&local->mtx);
                list_del(&sdata->u.vlan.list);
-               rcu_assign_pointer(sdata->vif.chanctx_conf, NULL);
+               mutex_unlock(&local->mtx);
+               RCU_INIT_POINTER(sdata->vif.chanctx_conf, NULL);
                /* no need to tell driver */
                break;
        case NL80211_IFTYPE_MONITOR:
@@ -895,7 +906,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
                break;
        case NL80211_IFTYPE_P2P_DEVICE:
                /* relies on synchronize_rcu() below */
-               rcu_assign_pointer(local->p2p_sdata, NULL);
+               RCU_INIT_POINTER(local->p2p_sdata, NULL);
                /* fall through */
        default:
                cancel_work_sync(&sdata->work);
@@ -1280,6 +1291,8 @@ static void ieee80211_setup_sdata(struct ieee80211_sub_if_data *sdata,
        INIT_WORK(&sdata->work, ieee80211_iface_work);
        INIT_WORK(&sdata->recalc_smps, ieee80211_recalc_smps_work);
        INIT_WORK(&sdata->csa_finalize_work, ieee80211_csa_finalize_work);
+       INIT_LIST_HEAD(&sdata->assigned_chanctx_list);
+       INIT_LIST_HEAD(&sdata->reserved_chanctx_list);
 
        switch (type) {
        case NL80211_IFTYPE_P2P_GO:
@@ -1774,20 +1787,19 @@ static int netdev_notify(struct notifier_block *nb,
        struct ieee80211_sub_if_data *sdata;
 
        if (state != NETDEV_CHANGENAME)
-               return 0;
+               return NOTIFY_DONE;
 
        if (!dev->ieee80211_ptr || !dev->ieee80211_ptr->wiphy)
-               return 0;
+               return NOTIFY_DONE;
 
        if (dev->ieee80211_ptr->wiphy->privid != mac80211_wiphy_privid)
-               return 0;
+               return NOTIFY_DONE;
 
        sdata = IEEE80211_DEV_TO_SUB_IF(dev);
-
        memcpy(sdata->name, dev->name, IFNAMSIZ);
-
        ieee80211_debugfs_rename_netdev(sdata);
-       return 0;
+
+       return NOTIFY_OK;
 }
 
 static struct notifier_block mac80211_netdev_notifier = {
index 4c1bf61bc778683dc352ebb32a585d45649168a6..27b9364cdf177d8f28e410bf50eff708bc40e424 100644 (file)
@@ -340,7 +340,7 @@ static int ieee80211_ifa_changed(struct notifier_block *nb,
 
        sdata_unlock(sdata);
 
-       return NOTIFY_DONE;
+       return NOTIFY_OK;
 }
 #endif
 
@@ -371,7 +371,7 @@ static int ieee80211_ifa6_changed(struct notifier_block *nb,
 
        drv_ipv6_addr_change(local, sdata, idev);
 
-       return NOTIFY_DONE;
+       return NOTIFY_OK;
 }
 #endif
 
@@ -446,7 +446,9 @@ static const struct ieee80211_ht_cap mac80211_ht_capa_mod_mask = {
        .cap_info = cpu_to_le16(IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
                                IEEE80211_HT_CAP_MAX_AMSDU |
                                IEEE80211_HT_CAP_SGI_20 |
-                               IEEE80211_HT_CAP_SGI_40),
+                               IEEE80211_HT_CAP_SGI_40 |
+                               IEEE80211_HT_CAP_LDPC_CODING |
+                               IEEE80211_HT_CAP_40MHZ_INTOLERANT),
        .mcs = {
                .rx_mask = { 0xff, 0xff, 0xff, 0xff, 0xff,
                             0xff, 0xff, 0xff, 0xff, 0xff, },
index f70e9cd10552dac6729d703edd2e9ae12750a3a6..b06ddc9519ce07a4464a6a8460108031abc2c4a9 100644 (file)
@@ -366,20 +366,15 @@ int mesh_add_rsn_ie(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb)
                return 0;
 
        /* find RSN IE */
-       data = ifmsh->ie;
-       while (data < ifmsh->ie + ifmsh->ie_len) {
-               if (*data == WLAN_EID_RSN) {
-                       len = data[1] + 2;
-                       break;
-               }
-               data++;
-       }
+       data = cfg80211_find_ie(WLAN_EID_RSN, ifmsh->ie, ifmsh->ie_len);
+       if (!data)
+               return 0;
 
-       if (len) {
-               if (skb_tailroom(skb) < len)
-                       return -ENOMEM;
-               memcpy(skb_put(skb, len), data, len);
-       }
+       len = data[1] + 2;
+
+       if (skb_tailroom(skb) < len)
+               return -ENOMEM;
+       memcpy(skb_put(skb, len), data, len);
 
        return 0;
 }
@@ -829,7 +824,7 @@ void ieee80211_stop_mesh(struct ieee80211_sub_if_data *sdata)
        ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON_ENABLED);
        bcn = rcu_dereference_protected(ifmsh->beacon,
                                        lockdep_is_held(&sdata->wdev.mtx));
-       rcu_assign_pointer(ifmsh->beacon, NULL);
+       RCU_INIT_POINTER(ifmsh->beacon, NULL);
        kfree_rcu(bcn, rcu_head);
 
        /* flush STAs and mpaths on this iface */
@@ -903,14 +898,15 @@ ieee80211_mesh_process_chnswitch(struct ieee80211_sub_if_data *sdata,
        }
 
        err = cfg80211_chandef_dfs_required(sdata->local->hw.wiphy,
-                                           &params.chandef);
+                                           &params.chandef,
+                                           NL80211_IFTYPE_MESH_POINT);
        if (err < 0)
                return false;
-       if (err) {
-               params.radar_required = true;
+       if (err > 0)
                /* TODO: DFS not (yet) supported */
                return false;
-       }
+
+       params.radar_required = err;
 
        if (cfg80211_chandef_identical(&params.chandef,
                                       &sdata->vif.bss_conf.chandef)) {
@@ -1068,7 +1064,7 @@ int ieee80211_mesh_finish_csa(struct ieee80211_sub_if_data *sdata)
 
        /* Remove the CSA and MCSP elements from the beacon */
        tmp_csa_settings = rcu_dereference(ifmsh->csa);
-       rcu_assign_pointer(ifmsh->csa, NULL);
+       RCU_INIT_POINTER(ifmsh->csa, NULL);
        if (tmp_csa_settings)
                kfree_rcu(tmp_csa_settings, rcu_head);
        ret = ieee80211_mesh_rebuild_beacon(sdata);
@@ -1102,7 +1098,7 @@ int ieee80211_mesh_csa_beacon(struct ieee80211_sub_if_data *sdata,
        ret = ieee80211_mesh_rebuild_beacon(sdata);
        if (ret) {
                tmp_csa_settings = rcu_dereference(ifmsh->csa);
-               rcu_assign_pointer(ifmsh->csa, NULL);
+               RCU_INIT_POINTER(ifmsh->csa, NULL);
                kfree_rcu(tmp_csa_settings, rcu_head);
                return ret;
        }
index f9514685d45a54802cb0f21dce0953ccbe9b78f4..94758b9c9ed48a5d1ea9921f4f9f0da40e34bd0b 100644 (file)
@@ -37,7 +37,7 @@ static inline u32 u32_field_get(const u8 *preq_elem, int offset, bool ae)
        return get_unaligned_le32(preq_elem + offset);
 }
 
-static inline u32 u16_field_get(const u8 *preq_elem, int offset, bool ae)
+static inline u16 u16_field_get(const u8 *preq_elem, int offset, bool ae)
 {
        if (ae)
                offset += 6;
@@ -544,9 +544,10 @@ static void hwmp_preq_frame_process(struct ieee80211_sub_if_data *sdata,
                if (time_after(jiffies, ifmsh->last_sn_update +
                                        net_traversal_jiffies(sdata)) ||
                    time_before(jiffies, ifmsh->last_sn_update)) {
-                       target_sn = ++ifmsh->sn;
+                       ++ifmsh->sn;
                        ifmsh->last_sn_update = jiffies;
                }
+               target_sn = ifmsh->sn;
        } else if (is_broadcast_ether_addr(target_addr) &&
                   (target_flags & IEEE80211_PREQ_TO_FLAG)) {
                rcu_read_lock();
index 3b848dad958762ccfc48732623d9ddf1f56aa8ae..0e4886f881f1e49f438fa0f61d5c2b021877bf30 100644 (file)
@@ -11,6 +11,7 @@
 #define MICHAEL_H
 
 #include <linux/types.h>
+#include <linux/ieee80211.h>
 
 #define MICHAEL_MIC_LEN 8
 
index dee50aefd6e868e247ba869e9e9883d4640330e3..488826f188a7a8a7f903fea8d9249ac9819b6c7c 100644 (file)
@@ -1089,7 +1089,7 @@ ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
        }
        chanctx = container_of(rcu_access_pointer(sdata->vif.chanctx_conf),
                               struct ieee80211_chanctx, conf);
-       if (chanctx->refcount > 1) {
+       if (ieee80211_chanctx_refcount(local, chanctx) > 1) {
                sdata_info(sdata,
                           "channel switch with multiple interfaces on the same channel, disconnecting\n");
                ieee80211_queue_work(&local->hw,
@@ -3701,7 +3701,7 @@ int ieee80211_max_network_latency(struct notifier_block *nb,
        ieee80211_recalc_ps(local, latency_usec);
        mutex_unlock(&local->iflist_mtx);
 
-       return 0;
+       return NOTIFY_OK;
 }
 
 static u8 ieee80211_ht_vht_rx_chains(struct ieee80211_sub_if_data *sdata,
index 216c45b949e513382447050eb560098a5edaa4b3..394e201cde6d3b6d4375f973937df55395547fea 100644 (file)
@@ -54,24 +54,25 @@ static struct sk_buff *remove_monitor_info(struct ieee80211_local *local,
        return skb;
 }
 
-static inline int should_drop_frame(struct sk_buff *skb, int present_fcs_len)
+static inline bool should_drop_frame(struct sk_buff *skb, int present_fcs_len)
 {
        struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
-       struct ieee80211_hdr *hdr;
-
-       hdr = (void *)(skb->data);
+       struct ieee80211_hdr *hdr = (void *)skb->data;
 
        if (status->flag & (RX_FLAG_FAILED_FCS_CRC |
                            RX_FLAG_FAILED_PLCP_CRC |
                            RX_FLAG_AMPDU_IS_ZEROLEN))
-               return 1;
+               return true;
+
        if (unlikely(skb->len < 16 + present_fcs_len))
-               return 1;
+               return true;
+
        if (ieee80211_is_ctl(hdr->frame_control) &&
            !ieee80211_is_pspoll(hdr->frame_control) &&
            !ieee80211_is_back_req(hdr->frame_control))
-               return 1;
-       return 0;
+               return true;
+
+       return false;
 }
 
 static int
@@ -1231,7 +1232,8 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx)
                if (ether_addr_equal(bssid, rx->sdata->u.ibss.bssid) &&
                    test_sta_flag(sta, WLAN_STA_AUTHORIZED)) {
                        sta->last_rx = jiffies;
-                       if (ieee80211_is_data(hdr->frame_control)) {
+                       if (ieee80211_is_data(hdr->frame_control) &&
+                           !is_multicast_ether_addr(hdr->addr1)) {
                                sta->last_rx_rate_idx = status->rate_idx;
                                sta->last_rx_rate_flag = status->flag;
                                sta->last_rx_rate_vht_flag = status->vht_flag;
@@ -3190,7 +3192,7 @@ static bool ieee80211_prepare_and_rx_handle(struct ieee80211_rx_data *rx,
 }
 
 /*
- * This is the actual Rx frames handler. as it blongs to Rx path it must
+ * This is the actual Rx frames handler. as it belongs to Rx path it must
  * be called with rcu_read_lock protection.
  */
 static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
index 3ce7f2c8539a1f626f7488833ee966fb3af5d502..28185c8dc19a302b31af1e0efe11dbaea8000b2f 100644 (file)
@@ -309,7 +309,7 @@ static void __ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted)
        if (local->scan_req != local->int_scan_req)
                cfg80211_scan_done(local->scan_req, aborted);
        local->scan_req = NULL;
-       rcu_assign_pointer(local->scan_sdata, NULL);
+       RCU_INIT_POINTER(local->scan_sdata, NULL);
 
        local->scanning = 0;
        local->scan_chandef.chan = NULL;
@@ -559,7 +559,7 @@ static int __ieee80211_start_scan(struct ieee80211_sub_if_data *sdata,
                ieee80211_recalc_idle(local);
 
                local->scan_req = NULL;
-               rcu_assign_pointer(local->scan_sdata, NULL);
+               RCU_INIT_POINTER(local->scan_sdata, NULL);
        }
 
        return rc;
@@ -773,7 +773,7 @@ void ieee80211_scan_work(struct work_struct *work)
                int rc;
 
                local->scan_req = NULL;
-               rcu_assign_pointer(local->scan_sdata, NULL);
+               RCU_INIT_POINTER(local->scan_sdata, NULL);
 
                rc = __ieee80211_start_scan(sdata, req);
                if (rc) {
@@ -1014,7 +1014,7 @@ out_free:
 
        if (ret) {
                /* Clean in case of failure after HW restart or upon resume. */
-               rcu_assign_pointer(local->sched_scan_sdata, NULL);
+               RCU_INIT_POINTER(local->sched_scan_sdata, NULL);
                local->sched_scan_req = NULL;
        }
 
@@ -1089,7 +1089,7 @@ void ieee80211_sched_scan_stopped_work(struct work_struct *work)
                return;
        }
 
-       rcu_assign_pointer(local->sched_scan_sdata, NULL);
+       RCU_INIT_POINTER(local->sched_scan_sdata, NULL);
 
        /* If sched scan was aborted by the driver. */
        local->sched_scan_req = NULL;
index 137a192e64bc3c2aa61cc9c5912a89bd3008cbe3..632d372bb5117fa464dff4d5cb587f56a226647a 100644 (file)
@@ -552,7 +552,7 @@ static int sta_info_insert_finish(struct sta_info *sta) __acquires(RCU)
 int sta_info_insert_rcu(struct sta_info *sta) __acquires(RCU)
 {
        struct ieee80211_local *local = sta->local;
-       int err = 0;
+       int err;
 
        might_sleep();
 
@@ -570,7 +570,6 @@ int sta_info_insert_rcu(struct sta_info *sta) __acquires(RCU)
 
        return 0;
  out_free:
-       BUG_ON(!err);
        sta_info_free(local, sta);
        return err;
 }
@@ -1148,7 +1147,8 @@ void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta)
        atomic_dec(&ps->num_sta_ps);
 
        /* This station just woke up and isn't aware of our SMPS state */
-       if (!ieee80211_smps_is_restrictive(sta->known_smps_mode,
+       if (!ieee80211_vif_is_mesh(&sdata->vif) &&
+           !ieee80211_smps_is_restrictive(sta->known_smps_mode,
                                           sdata->smps_mode) &&
            sta->known_smps_mode != sdata->bss->req_smps &&
            sta_info_tx_streams(sta) != 1) {
index 00ba90b02ab2ab79c01d58dc5ba25785993f8dd6..60cb7a665976e10e7a909a9545b7643cf34e67a4 100644 (file)
@@ -314,10 +314,9 @@ ieee80211_add_tx_radiotap_header(struct ieee80211_local *local,
            !is_multicast_ether_addr(hdr->addr1))
                txflags |= IEEE80211_RADIOTAP_F_TX_FAIL;
 
-       if ((info->status.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) ||
-           (info->status.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT))
+       if (info->status.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT)
                txflags |= IEEE80211_RADIOTAP_F_TX_CTS;
-       else if (info->status.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS)
+       if (info->status.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS)
                txflags |= IEEE80211_RADIOTAP_F_TX_RTS;
 
        put_unaligned_le16(txflags, pos);
index 275c94f995f7c8401749cbbafb249bb52a418be7..c08bd4aca6bb8b5c1e1e0c57abd356081610f711 100644 (file)
@@ -554,7 +554,7 @@ void ieee80211_flush_queues(struct ieee80211_local *local,
        ieee80211_stop_queues_by_reason(&local->hw, IEEE80211_MAX_QUEUE_MAP,
                                        IEEE80211_QUEUE_STOP_REASON_FLUSH);
 
-       drv_flush(local, queues, false);
+       drv_flush(local, sdata, queues, false);
 
        ieee80211_wake_queues_by_reason(&local->hw, IEEE80211_MAX_QUEUE_MAP,
                                        IEEE80211_QUEUE_STOP_REASON_FLUSH);
@@ -1546,7 +1546,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
                WARN_ON(local->resuming);
                res = drv_add_interface(local, sdata);
                if (WARN_ON(res)) {
-                       rcu_assign_pointer(local->monitor_sdata, NULL);
+                       RCU_INIT_POINTER(local->monitor_sdata, NULL);
                        synchronize_net();
                        kfree(sdata);
                }
@@ -1565,17 +1565,17 @@ int ieee80211_reconfig(struct ieee80211_local *local)
                list_for_each_entry(ctx, &local->chanctx_list, list)
                        WARN_ON(drv_add_chanctx(local, ctx));
                mutex_unlock(&local->chanctx_mtx);
-       }
 
-       list_for_each_entry(sdata, &local->interfaces, list) {
-               if (!ieee80211_sdata_running(sdata))
-                       continue;
-               ieee80211_assign_chanctx(local, sdata);
-       }
+               list_for_each_entry(sdata, &local->interfaces, list) {
+                       if (!ieee80211_sdata_running(sdata))
+                               continue;
+                       ieee80211_assign_chanctx(local, sdata);
+               }
 
-       sdata = rtnl_dereference(local->monitor_sdata);
-       if (sdata && ieee80211_sdata_running(sdata))
-               ieee80211_assign_chanctx(local, sdata);
+               sdata = rtnl_dereference(local->monitor_sdata);
+               if (sdata && ieee80211_sdata_running(sdata))
+                       ieee80211_assign_chanctx(local, sdata);
+       }
 
        /* add STAs back */
        mutex_lock(&local->sta_mtx);
@@ -1671,13 +1671,10 @@ int ieee80211_reconfig(struct ieee80211_local *local)
                        }
                        break;
                case NL80211_IFTYPE_WDS:
-                       break;
                case NL80211_IFTYPE_AP_VLAN:
                case NL80211_IFTYPE_MONITOR:
-                       /* ignore virtual */
-                       break;
                case NL80211_IFTYPE_P2P_DEVICE:
-                       changed = BSS_CHANGED_IDLE;
+                       /* nothing to do */
                        break;
                case NL80211_IFTYPE_UNSPECIFIED:
                case NUM_NL80211_IFTYPES:
@@ -1780,7 +1777,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
        mutex_unlock(&local->mtx);
 
        if (sched_scan_stopped)
-               cfg80211_sched_scan_stopped(local->hw.wiphy);
+               cfg80211_sched_scan_stopped_rtnl(local->hw.wiphy);
 
        /*
         * If this is for hw restart things are still running.
@@ -2797,3 +2794,121 @@ void ieee80211_recalc_dtim(struct ieee80211_local *local,
 
        ps->dtim_count = dtim_count;
 }
+
+int ieee80211_check_combinations(struct ieee80211_sub_if_data *sdata,
+                                const struct cfg80211_chan_def *chandef,
+                                enum ieee80211_chanctx_mode chanmode,
+                                u8 radar_detect)
+{
+       struct ieee80211_local *local = sdata->local;
+       struct ieee80211_sub_if_data *sdata_iter;
+       enum nl80211_iftype iftype = sdata->wdev.iftype;
+       int num[NUM_NL80211_IFTYPES];
+       struct ieee80211_chanctx *ctx;
+       int num_different_channels = 0;
+       int total = 1;
+
+       lockdep_assert_held(&local->chanctx_mtx);
+
+       if (WARN_ON(hweight32(radar_detect) > 1))
+               return -EINVAL;
+
+       if (WARN_ON(chandef && chanmode == IEEE80211_CHANCTX_SHARED &&
+                   !chandef->chan))
+               return -EINVAL;
+
+       if (chandef)
+               num_different_channels = 1;
+
+       if (WARN_ON(iftype >= NUM_NL80211_IFTYPES))
+               return -EINVAL;
+
+       /* Always allow software iftypes */
+       if (local->hw.wiphy->software_iftypes & BIT(iftype)) {
+               if (radar_detect)
+                       return -EINVAL;
+               return 0;
+       }
+
+       memset(num, 0, sizeof(num));
+
+       if (iftype != NL80211_IFTYPE_UNSPECIFIED)
+               num[iftype] = 1;
+
+       list_for_each_entry(ctx, &local->chanctx_list, list) {
+               if (ctx->conf.radar_enabled)
+                       radar_detect |= BIT(ctx->conf.def.width);
+               if (ctx->mode == IEEE80211_CHANCTX_EXCLUSIVE) {
+                       num_different_channels++;
+                       continue;
+               }
+               if (chandef && chanmode == IEEE80211_CHANCTX_SHARED &&
+                   cfg80211_chandef_compatible(chandef,
+                                               &ctx->conf.def))
+                       continue;
+               num_different_channels++;
+       }
+
+       list_for_each_entry_rcu(sdata_iter, &local->interfaces, list) {
+               struct wireless_dev *wdev_iter;
+
+               wdev_iter = &sdata_iter->wdev;
+
+               if (sdata_iter == sdata ||
+                   rcu_access_pointer(sdata_iter->vif.chanctx_conf) == NULL ||
+                   local->hw.wiphy->software_iftypes & BIT(wdev_iter->iftype))
+                       continue;
+
+               num[wdev_iter->iftype]++;
+               total++;
+       }
+
+       if (total == 1 && !radar_detect)
+               return 0;
+
+       return cfg80211_check_combinations(local->hw.wiphy,
+                                          num_different_channels,
+                                          radar_detect, num);
+}
+
+static void
+ieee80211_iter_max_chans(const struct ieee80211_iface_combination *c,
+                        void *data)
+{
+       u32 *max_num_different_channels = data;
+
+       *max_num_different_channels = max(*max_num_different_channels,
+                                         c->num_different_channels);
+}
+
+int ieee80211_max_num_channels(struct ieee80211_local *local)
+{
+       struct ieee80211_sub_if_data *sdata;
+       int num[NUM_NL80211_IFTYPES] = {};
+       struct ieee80211_chanctx *ctx;
+       int num_different_channels = 0;
+       u8 radar_detect = 0;
+       u32 max_num_different_channels = 1;
+       int err;
+
+       lockdep_assert_held(&local->chanctx_mtx);
+
+       list_for_each_entry(ctx, &local->chanctx_list, list) {
+               num_different_channels++;
+
+               if (ctx->conf.radar_enabled)
+                       radar_detect |= BIT(ctx->conf.def.width);
+       }
+
+       list_for_each_entry_rcu(sdata, &local->interfaces, list)
+               num[sdata->wdev.iftype]++;
+
+       err = cfg80211_iter_combinations(local->hw.wiphy,
+                                        num_different_channels, radar_detect,
+                                        num, ieee80211_iter_max_chans,
+                                        &max_num_different_channels);
+       if (err < 0)
+               return err;
+
+       return max_num_different_channels;
+}
index b8600e3c29c828d918b3676397f73a4d0fe7892c..9b3dcc201145dd3942bf30771e1638ea973759f7 100644 (file)
@@ -406,7 +406,10 @@ static int ccmp_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb)
 
        if (info->control.hw_key &&
            !(info->control.hw_key->flags & IEEE80211_KEY_FLAG_GENERATE_IV) &&
-           !(info->control.hw_key->flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE)) {
+           !(info->control.hw_key->flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE) &&
+           !((info->control.hw_key->flags &
+              IEEE80211_KEY_FLAG_GENERATE_IV_MGMT) &&
+             ieee80211_is_mgmt(hdr->frame_control))) {
                /*
                 * hwaccel has no need for preallocated room for CCMP
                 * header or MIC fields
index b33dd76d4307309bb02477606f8e7ccdefaa870d..1818a99b3081e5a87a5c1ed72e6dba1760f2c1bc 100644 (file)
@@ -2,6 +2,10 @@ config MAC802154
        tristate "Generic IEEE 802.15.4 Soft Networking Stack (mac802154)"
        depends on IEEE802154
        select CRC_CCITT
+       select CRYPTO_AUTHENC
+       select CRYPTO_CCM
+       select CRYPTO_CTR
+       select CRYPTO_AES
        ---help---
          This option enables the hardware independent IEEE 802.15.4
          networking stack for SoftMAC devices (the ones implementing
index 15d62df521825c8581fc25ec25a9caebc8902a72..9723d6f3f3e5b742e1d105b2667627949f1d7c7d 100644 (file)
@@ -1,4 +1,5 @@
 obj-$(CONFIG_MAC802154)        += mac802154.o
-mac802154-objs         := ieee802154_dev.o rx.o tx.o mac_cmd.o mib.o monitor.o wpan.o
+mac802154-objs         := ieee802154_dev.o rx.o tx.o mac_cmd.o mib.o \
+                          monitor.o wpan.o llsec.o
 
 ccflags-y += -D__CHECK_ENDIAN__
diff --git a/net/mac802154/llsec.c b/net/mac802154/llsec.c
new file mode 100644 (file)
index 0000000..e4a2558
--- /dev/null
@@ -0,0 +1,1069 @@
+/*
+ * Copyright (C) 2014 Fraunhofer ITWM
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * Written by:
+ * Phoebe Buckheister <phoebe.buckheister@itwm.fraunhofer.de>
+ */
+
+#include <linux/err.h>
+#include <linux/bug.h>
+#include <linux/completion.h>
+#include <net/ieee802154.h>
+#include <crypto/algapi.h>
+
+#include "mac802154.h"
+#include "llsec.h"
+
+static void llsec_key_put(struct mac802154_llsec_key *key);
+static bool llsec_key_id_equal(const struct ieee802154_llsec_key_id *a,
+                              const struct ieee802154_llsec_key_id *b);
+
+static void llsec_dev_free(struct mac802154_llsec_device *dev);
+
+void mac802154_llsec_init(struct mac802154_llsec *sec)
+{
+       memset(sec, 0, sizeof(*sec));
+
+       memset(&sec->params.default_key_source, 0xFF, IEEE802154_ADDR_LEN);
+
+       INIT_LIST_HEAD(&sec->table.security_levels);
+       INIT_LIST_HEAD(&sec->table.devices);
+       INIT_LIST_HEAD(&sec->table.keys);
+       hash_init(sec->devices_short);
+       hash_init(sec->devices_hw);
+       rwlock_init(&sec->lock);
+}
+
+void mac802154_llsec_destroy(struct mac802154_llsec *sec)
+{
+       struct ieee802154_llsec_seclevel *sl, *sn;
+       struct ieee802154_llsec_device *dev, *dn;
+       struct ieee802154_llsec_key_entry *key, *kn;
+
+       list_for_each_entry_safe(sl, sn, &sec->table.security_levels, list) {
+               struct mac802154_llsec_seclevel *msl;
+
+               msl = container_of(sl, struct mac802154_llsec_seclevel, level);
+               list_del(&sl->list);
+               kfree(msl);
+       }
+
+       list_for_each_entry_safe(dev, dn, &sec->table.devices, list) {
+               struct mac802154_llsec_device *mdev;
+
+               mdev = container_of(dev, struct mac802154_llsec_device, dev);
+               list_del(&dev->list);
+               llsec_dev_free(mdev);
+       }
+
+       list_for_each_entry_safe(key, kn, &sec->table.keys, list) {
+               struct mac802154_llsec_key *mkey;
+
+               mkey = container_of(key->key, struct mac802154_llsec_key, key);
+               list_del(&key->list);
+               llsec_key_put(mkey);
+               kfree(key);
+       }
+}
+
+
+
+int mac802154_llsec_get_params(struct mac802154_llsec *sec,
+                              struct ieee802154_llsec_params *params)
+{
+       read_lock_bh(&sec->lock);
+       *params = sec->params;
+       read_unlock_bh(&sec->lock);
+
+       return 0;
+}
+
+int mac802154_llsec_set_params(struct mac802154_llsec *sec,
+                              const struct ieee802154_llsec_params *params,
+                              int changed)
+{
+       write_lock_bh(&sec->lock);
+
+       if (changed & IEEE802154_LLSEC_PARAM_ENABLED)
+               sec->params.enabled = params->enabled;
+       if (changed & IEEE802154_LLSEC_PARAM_FRAME_COUNTER)
+               sec->params.frame_counter = params->frame_counter;
+       if (changed & IEEE802154_LLSEC_PARAM_OUT_LEVEL)
+               sec->params.out_level = params->out_level;
+       if (changed & IEEE802154_LLSEC_PARAM_OUT_KEY)
+               sec->params.out_key = params->out_key;
+       if (changed & IEEE802154_LLSEC_PARAM_KEY_SOURCE)
+               sec->params.default_key_source = params->default_key_source;
+       if (changed & IEEE802154_LLSEC_PARAM_PAN_ID)
+               sec->params.pan_id = params->pan_id;
+       if (changed & IEEE802154_LLSEC_PARAM_HWADDR)
+               sec->params.hwaddr = params->hwaddr;
+       if (changed & IEEE802154_LLSEC_PARAM_COORD_HWADDR)
+               sec->params.coord_hwaddr = params->coord_hwaddr;
+       if (changed & IEEE802154_LLSEC_PARAM_COORD_SHORTADDR)
+               sec->params.coord_shortaddr = params->coord_shortaddr;
+
+       write_unlock_bh(&sec->lock);
+
+       return 0;
+}
+
+
+
+static struct mac802154_llsec_key*
+llsec_key_alloc(const struct ieee802154_llsec_key *template)
+{
+       const int authsizes[3] = { 4, 8, 16 };
+       struct mac802154_llsec_key *key;
+       int i;
+
+       key = kzalloc(sizeof(*key), GFP_KERNEL);
+       if (!key)
+               return NULL;
+
+       kref_init(&key->ref);
+       key->key = *template;
+
+       BUILD_BUG_ON(ARRAY_SIZE(authsizes) != ARRAY_SIZE(key->tfm));
+
+       for (i = 0; i < ARRAY_SIZE(key->tfm); i++) {
+               key->tfm[i] = crypto_alloc_aead("ccm(aes)", 0,
+                                               CRYPTO_ALG_ASYNC);
+               if (!key->tfm[i])
+                       goto err_tfm;
+               if (crypto_aead_setkey(key->tfm[i], template->key,
+                                      IEEE802154_LLSEC_KEY_SIZE))
+                       goto err_tfm;
+               if (crypto_aead_setauthsize(key->tfm[i], authsizes[i]))
+                       goto err_tfm;
+       }
+
+       key->tfm0 = crypto_alloc_blkcipher("ctr(aes)", 0, CRYPTO_ALG_ASYNC);
+       if (!key->tfm0)
+               goto err_tfm;
+
+       if (crypto_blkcipher_setkey(key->tfm0, template->key,
+                                   IEEE802154_LLSEC_KEY_SIZE))
+               goto err_tfm0;
+
+       return key;
+
+err_tfm0:
+       crypto_free_blkcipher(key->tfm0);
+err_tfm:
+       for (i = 0; i < ARRAY_SIZE(key->tfm); i++)
+               if (key->tfm[i])
+                       crypto_free_aead(key->tfm[i]);
+
+       kfree(key);
+       return NULL;
+}
+
+static void llsec_key_release(struct kref *ref)
+{
+       struct mac802154_llsec_key *key;
+       int i;
+
+       key = container_of(ref, struct mac802154_llsec_key, ref);
+
+       for (i = 0; i < ARRAY_SIZE(key->tfm); i++)
+               crypto_free_aead(key->tfm[i]);
+
+       crypto_free_blkcipher(key->tfm0);
+       kfree(key);
+}
+
+static struct mac802154_llsec_key*
+llsec_key_get(struct mac802154_llsec_key *key)
+{
+       kref_get(&key->ref);
+       return key;
+}
+
+static void llsec_key_put(struct mac802154_llsec_key *key)
+{
+       kref_put(&key->ref, llsec_key_release);
+}
+
+static bool llsec_key_id_equal(const struct ieee802154_llsec_key_id *a,
+                              const struct ieee802154_llsec_key_id *b)
+{
+       if (a->mode != b->mode)
+               return false;
+
+       if (a->mode == IEEE802154_SCF_KEY_IMPLICIT)
+               return ieee802154_addr_equal(&a->device_addr, &b->device_addr);
+
+       if (a->id != b->id)
+               return false;
+
+       switch (a->mode) {
+       case IEEE802154_SCF_KEY_INDEX:
+               return true;
+       case IEEE802154_SCF_KEY_SHORT_INDEX:
+               return a->short_source == b->short_source;
+       case IEEE802154_SCF_KEY_HW_INDEX:
+               return a->extended_source == b->extended_source;
+       }
+
+       return false;
+}
+
+int mac802154_llsec_key_add(struct mac802154_llsec *sec,
+                           const struct ieee802154_llsec_key_id *id,
+                           const struct ieee802154_llsec_key *key)
+{
+       struct mac802154_llsec_key *mkey = NULL;
+       struct ieee802154_llsec_key_entry *pos, *new;
+
+       if (!(key->frame_types & (1 << IEEE802154_FC_TYPE_MAC_CMD)) &&
+           key->cmd_frame_ids)
+               return -EINVAL;
+
+       list_for_each_entry(pos, &sec->table.keys, list) {
+               if (llsec_key_id_equal(&pos->id, id))
+                       return -EEXIST;
+
+               if (memcmp(pos->key->key, key->key,
+                          IEEE802154_LLSEC_KEY_SIZE))
+                       continue;
+
+               mkey = container_of(pos->key, struct mac802154_llsec_key, key);
+
+               /* Don't allow multiple instances of the same AES key to have
+                * different allowed frame types/command frame ids, as this is
+                * not possible in the 802.15.4 PIB.
+                */
+               if (pos->key->frame_types != key->frame_types ||
+                   pos->key->cmd_frame_ids != key->cmd_frame_ids)
+                       return -EEXIST;
+
+               break;
+       }
+
+       new = kzalloc(sizeof(*new), GFP_KERNEL);
+       if (!new)
+               return -ENOMEM;
+
+       if (!mkey)
+               mkey = llsec_key_alloc(key);
+       else
+               mkey = llsec_key_get(mkey);
+
+       if (!mkey)
+               goto fail;
+
+       new->id = *id;
+       new->key = &mkey->key;
+
+       list_add_rcu(&new->list, &sec->table.keys);
+
+       return 0;
+
+fail:
+       kfree(new);
+       return -ENOMEM;
+}
+
+int mac802154_llsec_key_del(struct mac802154_llsec *sec,
+                           const struct ieee802154_llsec_key_id *key)
+{
+       struct ieee802154_llsec_key_entry *pos;
+
+       list_for_each_entry(pos, &sec->table.keys, list) {
+               struct mac802154_llsec_key *mkey;
+
+               mkey = container_of(pos->key, struct mac802154_llsec_key, key);
+
+               if (llsec_key_id_equal(&pos->id, key)) {
+                       llsec_key_put(mkey);
+                       return 0;
+               }
+       }
+
+       return -ENOENT;
+}
+
+
+
+static bool llsec_dev_use_shortaddr(__le16 short_addr)
+{
+       return short_addr != cpu_to_le16(IEEE802154_ADDR_UNDEF) &&
+               short_addr != cpu_to_le16(0xffff);
+}
+
+static u32 llsec_dev_hash_short(__le16 short_addr, __le16 pan_id)
+{
+       return ((__force u16) short_addr) << 16 | (__force u16) pan_id;
+}
+
+static u64 llsec_dev_hash_long(__le64 hwaddr)
+{
+       return (__force u64) hwaddr;
+}
+
+static struct mac802154_llsec_device*
+llsec_dev_find_short(struct mac802154_llsec *sec, __le16 short_addr,
+                    __le16 pan_id)
+{
+       struct mac802154_llsec_device *dev;
+       u32 key = llsec_dev_hash_short(short_addr, pan_id);
+
+       hash_for_each_possible_rcu(sec->devices_short, dev, bucket_s, key) {
+               if (dev->dev.short_addr == short_addr &&
+                   dev->dev.pan_id == pan_id)
+                       return dev;
+       }
+
+       return NULL;
+}
+
+static struct mac802154_llsec_device*
+llsec_dev_find_long(struct mac802154_llsec *sec, __le64 hwaddr)
+{
+       struct mac802154_llsec_device *dev;
+       u64 key = llsec_dev_hash_long(hwaddr);
+
+       hash_for_each_possible_rcu(sec->devices_hw, dev, bucket_hw, key) {
+               if (dev->dev.hwaddr == hwaddr)
+                       return dev;
+       }
+
+       return NULL;
+}
+
+static void llsec_dev_free(struct mac802154_llsec_device *dev)
+{
+       struct ieee802154_llsec_device_key *pos, *pn;
+       struct mac802154_llsec_device_key *devkey;
+
+       list_for_each_entry_safe(pos, pn, &dev->dev.keys, list) {
+               devkey = container_of(pos, struct mac802154_llsec_device_key,
+                                     devkey);
+
+               list_del(&pos->list);
+               kfree(devkey);
+       }
+
+       kfree(dev);
+}
+
+int mac802154_llsec_dev_add(struct mac802154_llsec *sec,
+                           const struct ieee802154_llsec_device *dev)
+{
+       struct mac802154_llsec_device *entry;
+       u32 skey = llsec_dev_hash_short(dev->short_addr, dev->pan_id);
+       u64 hwkey = llsec_dev_hash_long(dev->hwaddr);
+
+       BUILD_BUG_ON(sizeof(hwkey) != IEEE802154_ADDR_LEN);
+
+       if ((llsec_dev_use_shortaddr(dev->short_addr) &&
+            llsec_dev_find_short(sec, dev->short_addr, dev->pan_id)) ||
+            llsec_dev_find_long(sec, dev->hwaddr))
+               return -EEXIST;
+
+       entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+       if (!entry)
+               return -ENOMEM;
+
+       entry->dev = *dev;
+       spin_lock_init(&entry->lock);
+       INIT_LIST_HEAD(&entry->dev.keys);
+
+       if (llsec_dev_use_shortaddr(dev->short_addr))
+               hash_add_rcu(sec->devices_short, &entry->bucket_s, skey);
+       else
+               INIT_HLIST_NODE(&entry->bucket_s);
+
+       hash_add_rcu(sec->devices_hw, &entry->bucket_hw, hwkey);
+       list_add_tail_rcu(&entry->dev.list, &sec->table.devices);
+
+       return 0;
+}
+
+static void llsec_dev_free_rcu(struct rcu_head *rcu)
+{
+       llsec_dev_free(container_of(rcu, struct mac802154_llsec_device, rcu));
+}
+
+int mac802154_llsec_dev_del(struct mac802154_llsec *sec, __le64 device_addr)
+{
+       struct mac802154_llsec_device *pos;
+
+       pos = llsec_dev_find_long(sec, device_addr);
+       if (!pos)
+               return -ENOENT;
+
+       hash_del_rcu(&pos->bucket_s);
+       hash_del_rcu(&pos->bucket_hw);
+       call_rcu(&pos->rcu, llsec_dev_free_rcu);
+
+       return 0;
+}
+
+
+
+static struct mac802154_llsec_device_key*
+llsec_devkey_find(struct mac802154_llsec_device *dev,
+                 const struct ieee802154_llsec_key_id *key)
+{
+       struct ieee802154_llsec_device_key *devkey;
+
+       list_for_each_entry_rcu(devkey, &dev->dev.keys, list) {
+               if (!llsec_key_id_equal(key, &devkey->key_id))
+                       continue;
+
+               return container_of(devkey, struct mac802154_llsec_device_key,
+                                   devkey);
+       }
+
+       return NULL;
+}
+
+int mac802154_llsec_devkey_add(struct mac802154_llsec *sec,
+                              __le64 dev_addr,
+                              const struct ieee802154_llsec_device_key *key)
+{
+       struct mac802154_llsec_device *dev;
+       struct mac802154_llsec_device_key *devkey;
+
+       dev = llsec_dev_find_long(sec, dev_addr);
+
+       if (!dev)
+               return -ENOENT;
+
+       if (llsec_devkey_find(dev, &key->key_id))
+               return -EEXIST;
+
+       devkey = kmalloc(sizeof(*devkey), GFP_KERNEL);
+       if (!devkey)
+               return -ENOMEM;
+
+       devkey->devkey = *key;
+       list_add_tail_rcu(&devkey->devkey.list, &dev->dev.keys);
+       return 0;
+}
+
+int mac802154_llsec_devkey_del(struct mac802154_llsec *sec,
+                              __le64 dev_addr,
+                              const struct ieee802154_llsec_device_key *key)
+{
+       struct mac802154_llsec_device *dev;
+       struct mac802154_llsec_device_key *devkey;
+
+       dev = llsec_dev_find_long(sec, dev_addr);
+
+       if (!dev)
+               return -ENOENT;
+
+       devkey = llsec_devkey_find(dev, &key->key_id);
+       if (!devkey)
+               return -ENOENT;
+
+       list_del_rcu(&devkey->devkey.list);
+       kfree_rcu(devkey, rcu);
+       return 0;
+}
+
+
+
+static struct mac802154_llsec_seclevel*
+llsec_find_seclevel(const struct mac802154_llsec *sec,
+                   const struct ieee802154_llsec_seclevel *sl)
+{
+       struct ieee802154_llsec_seclevel *pos;
+
+       list_for_each_entry(pos, &sec->table.security_levels, list) {
+               if (pos->frame_type != sl->frame_type ||
+                   (pos->frame_type == IEEE802154_FC_TYPE_MAC_CMD &&
+                    pos->cmd_frame_id != sl->cmd_frame_id) ||
+                   pos->device_override != sl->device_override ||
+                   pos->sec_levels != sl->sec_levels)
+                       continue;
+
+               return container_of(pos, struct mac802154_llsec_seclevel,
+                                   level);
+       }
+
+       return NULL;
+}
+
+int mac802154_llsec_seclevel_add(struct mac802154_llsec *sec,
+                                const struct ieee802154_llsec_seclevel *sl)
+{
+       struct mac802154_llsec_seclevel *entry;
+
+       if (llsec_find_seclevel(sec, sl))
+               return -EEXIST;
+
+       entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+       if (!entry)
+               return -ENOMEM;
+
+       entry->level = *sl;
+
+       list_add_tail_rcu(&entry->level.list, &sec->table.security_levels);
+
+       return 0;
+}
+
+int mac802154_llsec_seclevel_del(struct mac802154_llsec *sec,
+                                const struct ieee802154_llsec_seclevel *sl)
+{
+       struct mac802154_llsec_seclevel *pos;
+
+       pos = llsec_find_seclevel(sec, sl);
+       if (!pos)
+               return -ENOENT;
+
+       list_del_rcu(&pos->level.list);
+       kfree_rcu(pos, rcu);
+
+       return 0;
+}
+
+
+
+static int llsec_recover_addr(struct mac802154_llsec *sec,
+                             struct ieee802154_addr *addr)
+{
+       __le16 caddr = sec->params.coord_shortaddr;
+       addr->pan_id = sec->params.pan_id;
+
+       if (caddr == cpu_to_le16(IEEE802154_ADDR_BROADCAST)) {
+               return -EINVAL;
+       } else if (caddr == cpu_to_le16(IEEE802154_ADDR_UNDEF)) {
+               addr->extended_addr = sec->params.coord_hwaddr;
+               addr->mode = IEEE802154_ADDR_LONG;
+       } else {
+               addr->short_addr = sec->params.coord_shortaddr;
+               addr->mode = IEEE802154_ADDR_SHORT;
+       }
+
+       return 0;
+}
+
+static struct mac802154_llsec_key*
+llsec_lookup_key(struct mac802154_llsec *sec,
+                const struct ieee802154_hdr *hdr,
+                const struct ieee802154_addr *addr,
+                struct ieee802154_llsec_key_id *key_id)
+{
+       struct ieee802154_addr devaddr = *addr;
+       u8 key_id_mode = hdr->sec.key_id_mode;
+       struct ieee802154_llsec_key_entry *key_entry;
+       struct mac802154_llsec_key *key;
+
+       if (key_id_mode == IEEE802154_SCF_KEY_IMPLICIT &&
+           devaddr.mode == IEEE802154_ADDR_NONE) {
+               if (hdr->fc.type == IEEE802154_FC_TYPE_BEACON) {
+                       devaddr.extended_addr = sec->params.coord_hwaddr;
+                       devaddr.mode = IEEE802154_ADDR_LONG;
+               } else if (llsec_recover_addr(sec, &devaddr) < 0) {
+                       return NULL;
+               }
+       }
+
+       list_for_each_entry_rcu(key_entry, &sec->table.keys, list) {
+               const struct ieee802154_llsec_key_id *id = &key_entry->id;
+
+               if (!(key_entry->key->frame_types & BIT(hdr->fc.type)))
+                       continue;
+
+               if (id->mode != key_id_mode)
+                       continue;
+
+               if (key_id_mode == IEEE802154_SCF_KEY_IMPLICIT) {
+                       if (ieee802154_addr_equal(&devaddr, &id->device_addr))
+                               goto found;
+               } else {
+                       if (id->id != hdr->sec.key_id)
+                               continue;
+
+                       if ((key_id_mode == IEEE802154_SCF_KEY_INDEX) ||
+                           (key_id_mode == IEEE802154_SCF_KEY_SHORT_INDEX &&
+                            id->short_source == hdr->sec.short_src) ||
+                           (key_id_mode == IEEE802154_SCF_KEY_HW_INDEX &&
+                            id->extended_source == hdr->sec.extended_src))
+                               goto found;
+               }
+       }
+
+       return NULL;
+
+found:
+       key = container_of(key_entry->key, struct mac802154_llsec_key, key);
+       if (key_id)
+               *key_id = key_entry->id;
+       return llsec_key_get(key);
+}
+
+
+static void llsec_geniv(u8 iv[16], __le64 addr,
+                       const struct ieee802154_sechdr *sec)
+{
+       __be64 addr_bytes = (__force __be64) swab64((__force u64) addr);
+       __be32 frame_counter = (__force __be32) swab32((__force u32) sec->frame_counter);
+
+       iv[0] = 1; /* L' = L - 1 = 1 */
+       memcpy(iv + 1, &addr_bytes, sizeof(addr_bytes));
+       memcpy(iv + 9, &frame_counter, sizeof(frame_counter));
+       iv[13] = sec->level;
+       iv[14] = 0;
+       iv[15] = 1;
+}
+
+static int
+llsec_do_encrypt_unauth(struct sk_buff *skb, const struct mac802154_llsec *sec,
+                       const struct ieee802154_hdr *hdr,
+                       struct mac802154_llsec_key *key)
+{
+       u8 iv[16];
+       struct scatterlist src;
+       struct blkcipher_desc req = {
+               .tfm = key->tfm0,
+               .info = iv,
+               .flags = 0,
+       };
+
+       llsec_geniv(iv, sec->params.hwaddr, &hdr->sec);
+       sg_init_one(&src, skb->data, skb->len);
+       return crypto_blkcipher_encrypt_iv(&req, &src, &src, skb->len);
+}
+
+static struct crypto_aead*
+llsec_tfm_by_len(struct mac802154_llsec_key *key, int authlen)
+{
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(key->tfm); i++)
+               if (crypto_aead_authsize(key->tfm[i]) == authlen)
+                       return key->tfm[i];
+
+       BUG();
+}
+
+static int
+llsec_do_encrypt_auth(struct sk_buff *skb, const struct mac802154_llsec *sec,
+                     const struct ieee802154_hdr *hdr,
+                     struct mac802154_llsec_key *key)
+{
+       u8 iv[16];
+       unsigned char *data;
+       int authlen, assoclen, datalen, rc;
+       struct scatterlist src, assoc[2], dst[2];
+       struct aead_request *req;
+
+       authlen = ieee802154_sechdr_authtag_len(&hdr->sec);
+       llsec_geniv(iv, sec->params.hwaddr, &hdr->sec);
+
+       req = aead_request_alloc(llsec_tfm_by_len(key, authlen), GFP_ATOMIC);
+       if (!req)
+               return -ENOMEM;
+
+       sg_init_table(assoc, 2);
+       sg_set_buf(&assoc[0], skb_mac_header(skb), skb->mac_len);
+       assoclen = skb->mac_len;
+
+       data = skb_mac_header(skb) + skb->mac_len;
+       datalen = skb_tail_pointer(skb) - data;
+
+       if (hdr->sec.level & IEEE802154_SCF_SECLEVEL_ENC) {
+               sg_set_buf(&assoc[1], data, 0);
+       } else {
+               sg_set_buf(&assoc[1], data, datalen);
+               assoclen += datalen;
+               datalen = 0;
+       }
+
+       sg_init_one(&src, data, datalen);
+
+       sg_init_table(dst, 2);
+       sg_set_buf(&dst[0], data, datalen);
+       sg_set_buf(&dst[1], skb_put(skb, authlen), authlen);
+
+       aead_request_set_callback(req, 0, NULL, NULL);
+       aead_request_set_assoc(req, assoc, assoclen);
+       aead_request_set_crypt(req, &src, dst, datalen, iv);
+
+       rc = crypto_aead_encrypt(req);
+
+       kfree(req);
+
+       return rc;
+}
+
+static int llsec_do_encrypt(struct sk_buff *skb,
+                           const struct mac802154_llsec *sec,
+                           const struct ieee802154_hdr *hdr,
+                           struct mac802154_llsec_key *key)
+{
+       if (hdr->sec.level == IEEE802154_SCF_SECLEVEL_ENC)
+               return llsec_do_encrypt_unauth(skb, sec, hdr, key);
+       else
+               return llsec_do_encrypt_auth(skb, sec, hdr, key);
+}
+
+int mac802154_llsec_encrypt(struct mac802154_llsec *sec, struct sk_buff *skb)
+{
+       struct ieee802154_hdr hdr;
+       int rc, authlen, hlen;
+       struct mac802154_llsec_key *key;
+       u32 frame_ctr;
+
+       hlen = ieee802154_hdr_pull(skb, &hdr);
+
+       if (hlen < 0 || hdr.fc.type != IEEE802154_FC_TYPE_DATA)
+               return -EINVAL;
+
+       if (!hdr.fc.security_enabled || hdr.sec.level == 0) {
+               skb_push(skb, hlen);
+               return 0;
+       }
+
+       authlen = ieee802154_sechdr_authtag_len(&hdr.sec);
+
+       if (skb->len + hlen + authlen + IEEE802154_MFR_SIZE > IEEE802154_MTU)
+               return -EMSGSIZE;
+
+       rcu_read_lock();
+
+       read_lock_bh(&sec->lock);
+
+       if (!sec->params.enabled) {
+               rc = -EINVAL;
+               goto fail_read;
+       }
+
+       key = llsec_lookup_key(sec, &hdr, &hdr.dest, NULL);
+       if (!key) {
+               rc = -ENOKEY;
+               goto fail_read;
+       }
+
+       read_unlock_bh(&sec->lock);
+
+       write_lock_bh(&sec->lock);
+
+       frame_ctr = be32_to_cpu(sec->params.frame_counter);
+       hdr.sec.frame_counter = cpu_to_le32(frame_ctr);
+       if (frame_ctr == 0xFFFFFFFF) {
+               write_unlock_bh(&sec->lock);
+               llsec_key_put(key);
+               rc = -EOVERFLOW;
+               goto fail;
+       }
+
+       sec->params.frame_counter = cpu_to_be32(frame_ctr + 1);
+
+       write_unlock_bh(&sec->lock);
+
+       rcu_read_unlock();
+
+       skb->mac_len = ieee802154_hdr_push(skb, &hdr);
+       skb_reset_mac_header(skb);
+
+       rc = llsec_do_encrypt(skb, sec, &hdr, key);
+       llsec_key_put(key);
+
+       return rc;
+
+fail_read:
+       read_unlock_bh(&sec->lock);
+fail:
+       rcu_read_unlock();
+       return rc;
+}
+
+
+
+static struct mac802154_llsec_device*
+llsec_lookup_dev(struct mac802154_llsec *sec,
+                const struct ieee802154_addr *addr)
+{
+       struct ieee802154_addr devaddr = *addr;
+       struct mac802154_llsec_device *dev = NULL;
+
+       if (devaddr.mode == IEEE802154_ADDR_NONE &&
+           llsec_recover_addr(sec, &devaddr) < 0)
+               return NULL;
+
+       if (devaddr.mode == IEEE802154_ADDR_SHORT) {
+               u32 key = llsec_dev_hash_short(devaddr.short_addr,
+                                              devaddr.pan_id);
+
+               hash_for_each_possible_rcu(sec->devices_short, dev,
+                                          bucket_s, key) {
+                       if (dev->dev.pan_id == devaddr.pan_id &&
+                           dev->dev.short_addr == devaddr.short_addr)
+                               return dev;
+               }
+       } else {
+               u64 key = llsec_dev_hash_long(devaddr.extended_addr);
+
+               hash_for_each_possible_rcu(sec->devices_hw, dev,
+                                          bucket_hw, key) {
+                       if (dev->dev.hwaddr == devaddr.extended_addr)
+                               return dev;
+               }
+       }
+
+       return NULL;
+}
+
+static int
+llsec_lookup_seclevel(const struct mac802154_llsec *sec,
+                     u8 frame_type, u8 cmd_frame_id,
+                     struct ieee802154_llsec_seclevel *rlevel)
+{
+       struct ieee802154_llsec_seclevel *level;
+
+       list_for_each_entry_rcu(level, &sec->table.security_levels, list) {
+               if (level->frame_type == frame_type &&
+                   (frame_type != IEEE802154_FC_TYPE_MAC_CMD ||
+                    level->cmd_frame_id == cmd_frame_id)) {
+                       *rlevel = *level;
+                       return 0;
+               }
+       }
+
+       return -EINVAL;
+}
+
+static int
+llsec_do_decrypt_unauth(struct sk_buff *skb, const struct mac802154_llsec *sec,
+                       const struct ieee802154_hdr *hdr,
+                       struct mac802154_llsec_key *key, __le64 dev_addr)
+{
+       u8 iv[16];
+       unsigned char *data;
+       int datalen;
+       struct scatterlist src;
+       struct blkcipher_desc req = {
+               .tfm = key->tfm0,
+               .info = iv,
+               .flags = 0,
+       };
+
+       llsec_geniv(iv, dev_addr, &hdr->sec);
+       data = skb_mac_header(skb) + skb->mac_len;
+       datalen = skb_tail_pointer(skb) - data;
+
+       sg_init_one(&src, data, datalen);
+
+       return crypto_blkcipher_decrypt_iv(&req, &src, &src, datalen);
+}
+
+static int
+llsec_do_decrypt_auth(struct sk_buff *skb, const struct mac802154_llsec *sec,
+                     const struct ieee802154_hdr *hdr,
+                     struct mac802154_llsec_key *key, __le64 dev_addr)
+{
+       u8 iv[16];
+       unsigned char *data;
+       int authlen, datalen, assoclen, rc;
+       struct scatterlist src, assoc[2];
+       struct aead_request *req;
+
+       authlen = ieee802154_sechdr_authtag_len(&hdr->sec);
+       llsec_geniv(iv, dev_addr, &hdr->sec);
+
+       req = aead_request_alloc(llsec_tfm_by_len(key, authlen), GFP_ATOMIC);
+       if (!req)
+               return -ENOMEM;
+
+       sg_init_table(assoc, 2);
+       sg_set_buf(&assoc[0], skb_mac_header(skb), skb->mac_len);
+       assoclen = skb->mac_len;
+
+       data = skb_mac_header(skb) + skb->mac_len;
+       datalen = skb_tail_pointer(skb) - data;
+
+       if (hdr->sec.level & IEEE802154_SCF_SECLEVEL_ENC) {
+               sg_set_buf(&assoc[1], data, 0);
+       } else {
+               sg_set_buf(&assoc[1], data, datalen - authlen);
+               assoclen += datalen - authlen;
+               data += datalen - authlen;
+               datalen = authlen;
+       }
+
+       sg_init_one(&src, data, datalen);
+
+       aead_request_set_callback(req, 0, NULL, NULL);
+       aead_request_set_assoc(req, assoc, assoclen);
+       aead_request_set_crypt(req, &src, &src, datalen, iv);
+
+       rc = crypto_aead_decrypt(req);
+
+       kfree(req);
+       skb_trim(skb, skb->len - authlen);
+
+       return rc;
+}
+
+static int
+llsec_do_decrypt(struct sk_buff *skb, const struct mac802154_llsec *sec,
+                const struct ieee802154_hdr *hdr,
+                struct mac802154_llsec_key *key, __le64 dev_addr)
+{
+       if (hdr->sec.level == IEEE802154_SCF_SECLEVEL_ENC)
+               return llsec_do_decrypt_unauth(skb, sec, hdr, key, dev_addr);
+       else
+               return llsec_do_decrypt_auth(skb, sec, hdr, key, dev_addr);
+}
+
+static int
+llsec_update_devkey_record(struct mac802154_llsec_device *dev,
+                          const struct ieee802154_llsec_key_id *in_key)
+{
+       struct mac802154_llsec_device_key *devkey;
+
+       devkey = llsec_devkey_find(dev, in_key);
+
+       if (!devkey) {
+               struct mac802154_llsec_device_key *next;
+
+               next = kzalloc(sizeof(*devkey), GFP_ATOMIC);
+               if (!next)
+                       return -ENOMEM;
+
+               next->devkey.key_id = *in_key;
+
+               spin_lock_bh(&dev->lock);
+
+               devkey = llsec_devkey_find(dev, in_key);
+               if (!devkey)
+                       list_add_rcu(&next->devkey.list, &dev->dev.keys);
+               else
+                       kfree(next);
+
+               spin_unlock_bh(&dev->lock);
+       }
+
+       return 0;
+}
+
+static int
+llsec_update_devkey_info(struct mac802154_llsec_device *dev,
+                        const struct ieee802154_llsec_key_id *in_key,
+                        u32 frame_counter)
+{
+       struct mac802154_llsec_device_key *devkey = NULL;
+
+       if (dev->dev.key_mode == IEEE802154_LLSEC_DEVKEY_RESTRICT) {
+               devkey = llsec_devkey_find(dev, in_key);
+               if (!devkey)
+                       return -ENOENT;
+       }
+
+       if (dev->dev.key_mode == IEEE802154_LLSEC_DEVKEY_RECORD) {
+               int rc = llsec_update_devkey_record(dev, in_key);
+
+               if (rc < 0)
+                       return rc;
+       }
+
+       spin_lock_bh(&dev->lock);
+
+       if ((!devkey && frame_counter < dev->dev.frame_counter) ||
+           (devkey && frame_counter < devkey->devkey.frame_counter)) {
+               spin_unlock_bh(&dev->lock);
+               return -EINVAL;
+       }
+
+       if (devkey)
+               devkey->devkey.frame_counter = frame_counter + 1;
+       else
+               dev->dev.frame_counter = frame_counter + 1;
+
+       spin_unlock_bh(&dev->lock);
+
+       return 0;
+}
+
+int mac802154_llsec_decrypt(struct mac802154_llsec *sec, struct sk_buff *skb)
+{
+       struct ieee802154_hdr hdr;
+       struct mac802154_llsec_key *key;
+       struct ieee802154_llsec_key_id key_id;
+       struct mac802154_llsec_device *dev;
+       struct ieee802154_llsec_seclevel seclevel;
+       int err;
+       __le64 dev_addr;
+       u32 frame_ctr;
+
+       if (ieee802154_hdr_peek(skb, &hdr) < 0)
+               return -EINVAL;
+       if (!hdr.fc.security_enabled)
+               return 0;
+       if (hdr.fc.version == 0)
+               return -EINVAL;
+
+       read_lock_bh(&sec->lock);
+       if (!sec->params.enabled) {
+               read_unlock_bh(&sec->lock);
+               return -EINVAL;
+       }
+       read_unlock_bh(&sec->lock);
+
+       rcu_read_lock();
+
+       key = llsec_lookup_key(sec, &hdr, &hdr.source, &key_id);
+       if (!key) {
+               err = -ENOKEY;
+               goto fail;
+       }
+
+       dev = llsec_lookup_dev(sec, &hdr.source);
+       if (!dev) {
+               err = -EINVAL;
+               goto fail_dev;
+       }
+
+       if (llsec_lookup_seclevel(sec, hdr.fc.type, 0, &seclevel) < 0) {
+               err = -EINVAL;
+               goto fail_dev;
+       }
+
+       if (!(seclevel.sec_levels & BIT(hdr.sec.level)) &&
+           (hdr.sec.level == 0 && seclevel.device_override &&
+            !dev->dev.seclevel_exempt)) {
+               err = -EINVAL;
+               goto fail_dev;
+       }
+
+       frame_ctr = le32_to_cpu(hdr.sec.frame_counter);
+
+       if (frame_ctr == 0xffffffff) {
+               err = -EOVERFLOW;
+               goto fail_dev;
+       }
+
+       err = llsec_update_devkey_info(dev, &key_id, frame_ctr);
+       if (err)
+               goto fail_dev;
+
+       dev_addr = dev->dev.hwaddr;
+
+       rcu_read_unlock();
+
+       err = llsec_do_decrypt(skb, sec, &hdr, key, dev_addr);
+       llsec_key_put(key);
+       return err;
+
+fail_dev:
+       llsec_key_put(key);
+fail:
+       rcu_read_unlock();
+       return err;
+}
diff --git a/net/mac802154/llsec.h b/net/mac802154/llsec.h
new file mode 100644 (file)
index 0000000..950578e
--- /dev/null
@@ -0,0 +1,108 @@
+/*
+ * Copyright (C) 2014 Fraunhofer ITWM
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * Written by:
+ * Phoebe Buckheister <phoebe.buckheister@itwm.fraunhofer.de>
+ */
+
+#ifndef MAC802154_LLSEC_H
+#define MAC802154_LLSEC_H
+
+#include <linux/slab.h>
+#include <linux/hashtable.h>
+#include <linux/crypto.h>
+#include <linux/kref.h>
+#include <linux/spinlock.h>
+#include <net/af_ieee802154.h>
+#include <net/ieee802154_netdev.h>
+
+struct mac802154_llsec_key {
+       struct ieee802154_llsec_key key;
+
+       /* one tfm for each authsize (4/8/16) */
+       struct crypto_aead *tfm[3];
+       struct crypto_blkcipher *tfm0;
+
+       struct kref ref;
+};
+
+struct mac802154_llsec_device_key {
+       struct ieee802154_llsec_device_key devkey;
+
+       struct rcu_head rcu;
+};
+
+struct mac802154_llsec_device {
+       struct ieee802154_llsec_device dev;
+
+       struct hlist_node bucket_s;
+       struct hlist_node bucket_hw;
+
+       /* protects dev.frame_counter and the elements of dev.keys */
+       spinlock_t lock;
+
+       struct rcu_head rcu;
+};
+
+struct mac802154_llsec_seclevel {
+       struct ieee802154_llsec_seclevel level;
+
+       struct rcu_head rcu;
+};
+
+struct mac802154_llsec {
+       struct ieee802154_llsec_params params;
+       struct ieee802154_llsec_table table;
+
+       DECLARE_HASHTABLE(devices_short, 6);
+       DECLARE_HASHTABLE(devices_hw, 6);
+
+       /* protects params, all other fields are fine with RCU */
+       rwlock_t lock;
+};
+
+void mac802154_llsec_init(struct mac802154_llsec *sec);
+void mac802154_llsec_destroy(struct mac802154_llsec *sec);
+
+int mac802154_llsec_get_params(struct mac802154_llsec *sec,
+                              struct ieee802154_llsec_params *params);
+int mac802154_llsec_set_params(struct mac802154_llsec *sec,
+                              const struct ieee802154_llsec_params *params,
+                              int changed);
+
+int mac802154_llsec_key_add(struct mac802154_llsec *sec,
+                           const struct ieee802154_llsec_key_id *id,
+                           const struct ieee802154_llsec_key *key);
+int mac802154_llsec_key_del(struct mac802154_llsec *sec,
+                           const struct ieee802154_llsec_key_id *key);
+
+int mac802154_llsec_dev_add(struct mac802154_llsec *sec,
+                           const struct ieee802154_llsec_device *dev);
+int mac802154_llsec_dev_del(struct mac802154_llsec *sec,
+                           __le64 device_addr);
+
+int mac802154_llsec_devkey_add(struct mac802154_llsec *sec,
+                              __le64 dev_addr,
+                              const struct ieee802154_llsec_device_key *key);
+int mac802154_llsec_devkey_del(struct mac802154_llsec *sec,
+                              __le64 dev_addr,
+                              const struct ieee802154_llsec_device_key *key);
+
+int mac802154_llsec_seclevel_add(struct mac802154_llsec *sec,
+                                const struct ieee802154_llsec_seclevel *sl);
+int mac802154_llsec_seclevel_del(struct mac802154_llsec *sec,
+                                const struct ieee802154_llsec_seclevel *sl);
+
+int mac802154_llsec_encrypt(struct mac802154_llsec *sec, struct sk_buff *skb);
+int mac802154_llsec_decrypt(struct mac802154_llsec *sec, struct sk_buff *skb);
+
+#endif /* MAC802154_LLSEC_H */
index 28ef59c566e6ee78df73edd1246d84d4c2c4d912..762a6f849c6b7d3edf3e8677ee6f2081c10c8fac 100644 (file)
 #ifndef MAC802154_H
 #define MAC802154_H
 
+#include <linux/mutex.h>
+#include <net/mac802154.h>
 #include <net/ieee802154_netdev.h>
 
+#include "llsec.h"
+
 /* mac802154 device private data */
 struct mac802154_priv {
        struct ieee802154_dev hw;
@@ -90,6 +94,13 @@ struct mac802154_sub_if_data {
        u8 bsn;
        /* MAC DSN field */
        u8 dsn;
+
+       /* protects sec from concurrent access by netlink. access by
+        * encrypt/decrypt/header_create safe without additional protection.
+        */
+       struct mutex sec_mtx;
+
+       struct mac802154_llsec sec;
 };
 
 #define mac802154_to_priv(_hw) container_of(_hw, struct mac802154_priv, hw)
@@ -125,4 +136,37 @@ int mac802154_set_mac_params(struct net_device *dev,
 void mac802154_get_mac_params(struct net_device *dev,
                              struct ieee802154_mac_params *params);
 
+int mac802154_get_params(struct net_device *dev,
+                        struct ieee802154_llsec_params *params);
+int mac802154_set_params(struct net_device *dev,
+                        const struct ieee802154_llsec_params *params,
+                        int changed);
+
+int mac802154_add_key(struct net_device *dev,
+                     const struct ieee802154_llsec_key_id *id,
+                     const struct ieee802154_llsec_key *key);
+int mac802154_del_key(struct net_device *dev,
+                     const struct ieee802154_llsec_key_id *id);
+
+int mac802154_add_dev(struct net_device *dev,
+                     const struct ieee802154_llsec_device *llsec_dev);
+int mac802154_del_dev(struct net_device *dev, __le64 dev_addr);
+
+int mac802154_add_devkey(struct net_device *dev,
+                        __le64 device_addr,
+                        const struct ieee802154_llsec_device_key *key);
+int mac802154_del_devkey(struct net_device *dev,
+                        __le64 device_addr,
+                        const struct ieee802154_llsec_device_key *key);
+
+int mac802154_add_seclevel(struct net_device *dev,
+                          const struct ieee802154_llsec_seclevel *sl);
+int mac802154_del_seclevel(struct net_device *dev,
+                          const struct ieee802154_llsec_seclevel *sl);
+
+void mac802154_lock_table(struct net_device *dev);
+void mac802154_get_table(struct net_device *dev,
+                        struct ieee802154_llsec_table **t);
+void mac802154_unlock_table(struct net_device *dev);
+
 #endif /* MAC802154_H */
index d40c0928bc622d5802c9dcd1bdb740150ee06744..bf809131eef776209040a8496ed1e2214e6bdebe 100644 (file)
@@ -40,6 +40,9 @@ static int mac802154_mlme_start_req(struct net_device *dev,
                                    u8 pan_coord, u8 blx,
                                    u8 coord_realign)
 {
+       struct ieee802154_mlme_ops *ops = ieee802154_mlme_ops(dev);
+       int rc = 0;
+
        BUG_ON(addr->mode != IEEE802154_ADDR_SHORT);
 
        mac802154_dev_set_pan_id(dev, addr->pan_id);
@@ -47,12 +50,31 @@ static int mac802154_mlme_start_req(struct net_device *dev,
        mac802154_dev_set_ieee_addr(dev);
        mac802154_dev_set_page_channel(dev, page, channel);
 
+       if (ops->llsec) {
+               struct ieee802154_llsec_params params;
+               int changed = 0;
+
+               params.coord_shortaddr = addr->short_addr;
+               changed |= IEEE802154_LLSEC_PARAM_COORD_SHORTADDR;
+
+               params.pan_id = addr->pan_id;
+               changed |= IEEE802154_LLSEC_PARAM_PAN_ID;
+
+               params.hwaddr = ieee802154_devaddr_from_raw(dev->dev_addr);
+               changed |= IEEE802154_LLSEC_PARAM_HWADDR;
+
+               params.coord_hwaddr = params.hwaddr;
+               changed |= IEEE802154_LLSEC_PARAM_COORD_HWADDR;
+
+               rc = ops->llsec->set_params(dev, &params, changed);
+       }
+
        /* FIXME: add validation for unused parameters to be sane
         * for SoftMAC
         */
        ieee802154_nl_start_confirm(dev, IEEE802154_SUCCESS);
 
-       return 0;
+       return rc;
 }
 
 static struct wpan_phy *mac802154_get_phy(const struct net_device *dev)
@@ -64,6 +86,22 @@ static struct wpan_phy *mac802154_get_phy(const struct net_device *dev)
        return to_phy(get_device(&priv->hw->phy->dev));
 }
 
+static struct ieee802154_llsec_ops mac802154_llsec_ops = {
+       .get_params = mac802154_get_params,
+       .set_params = mac802154_set_params,
+       .add_key = mac802154_add_key,
+       .del_key = mac802154_del_key,
+       .add_dev = mac802154_add_dev,
+       .del_dev = mac802154_del_dev,
+       .add_devkey = mac802154_add_devkey,
+       .del_devkey = mac802154_del_devkey,
+       .add_seclevel = mac802154_add_seclevel,
+       .del_seclevel = mac802154_del_seclevel,
+       .lock_table = mac802154_lock_table,
+       .get_table = mac802154_get_table,
+       .unlock_table = mac802154_unlock_table,
+};
+
 struct ieee802154_reduced_mlme_ops mac802154_mlme_reduced = {
        .get_phy = mac802154_get_phy,
 };
@@ -75,6 +113,8 @@ struct ieee802154_mlme_ops mac802154_mlme_wpan = {
        .get_short_addr = mac802154_dev_get_short_addr,
        .get_dsn = mac802154_dev_get_dsn,
 
+       .llsec = &mac802154_llsec_ops,
+
        .set_mac_params = mac802154_set_mac_params,
        .get_mac_params = mac802154_get_mac_params,
 };
index f0991f2344d403f3b1ed3a1f44fc2aebf50c72b4..15aa2f2b03a78c29138db43c08073c4ba2817e54 100644 (file)
@@ -213,3 +213,190 @@ void mac802154_dev_set_page_channel(struct net_device *dev, u8 page, u8 chan)
        } else
                mutex_unlock(&priv->hw->phy->pib_lock);
 }
+
+
+int mac802154_get_params(struct net_device *dev,
+                        struct ieee802154_llsec_params *params)
+{
+       struct mac802154_sub_if_data *priv = netdev_priv(dev);
+       int res;
+
+       BUG_ON(dev->type != ARPHRD_IEEE802154);
+
+       mutex_lock(&priv->sec_mtx);
+       res = mac802154_llsec_get_params(&priv->sec, params);
+       mutex_unlock(&priv->sec_mtx);
+
+       return res;
+}
+
+int mac802154_set_params(struct net_device *dev,
+                        const struct ieee802154_llsec_params *params,
+                        int changed)
+{
+       struct mac802154_sub_if_data *priv = netdev_priv(dev);
+       int res;
+
+       BUG_ON(dev->type != ARPHRD_IEEE802154);
+
+       mutex_lock(&priv->sec_mtx);
+       res = mac802154_llsec_set_params(&priv->sec, params, changed);
+       mutex_unlock(&priv->sec_mtx);
+
+       return res;
+}
+
+
+int mac802154_add_key(struct net_device *dev,
+                     const struct ieee802154_llsec_key_id *id,
+                     const struct ieee802154_llsec_key *key)
+{
+       struct mac802154_sub_if_data *priv = netdev_priv(dev);
+       int res;
+
+       BUG_ON(dev->type != ARPHRD_IEEE802154);
+
+       mutex_lock(&priv->sec_mtx);
+       res = mac802154_llsec_key_add(&priv->sec, id, key);
+       mutex_unlock(&priv->sec_mtx);
+
+       return res;
+}
+
+int mac802154_del_key(struct net_device *dev,
+                     const struct ieee802154_llsec_key_id *id)
+{
+       struct mac802154_sub_if_data *priv = netdev_priv(dev);
+       int res;
+
+       BUG_ON(dev->type != ARPHRD_IEEE802154);
+
+       mutex_lock(&priv->sec_mtx);
+       res = mac802154_llsec_key_del(&priv->sec, id);
+       mutex_unlock(&priv->sec_mtx);
+
+       return res;
+}
+
+
+int mac802154_add_dev(struct net_device *dev,
+                     const struct ieee802154_llsec_device *llsec_dev)
+{
+       struct mac802154_sub_if_data *priv = netdev_priv(dev);
+       int res;
+
+       BUG_ON(dev->type != ARPHRD_IEEE802154);
+
+       mutex_lock(&priv->sec_mtx);
+       res = mac802154_llsec_dev_add(&priv->sec, llsec_dev);
+       mutex_unlock(&priv->sec_mtx);
+
+       return res;
+}
+
+int mac802154_del_dev(struct net_device *dev, __le64 dev_addr)
+{
+       struct mac802154_sub_if_data *priv = netdev_priv(dev);
+       int res;
+
+       BUG_ON(dev->type != ARPHRD_IEEE802154);
+
+       mutex_lock(&priv->sec_mtx);
+       res = mac802154_llsec_dev_del(&priv->sec, dev_addr);
+       mutex_unlock(&priv->sec_mtx);
+
+       return res;
+}
+
+
+int mac802154_add_devkey(struct net_device *dev,
+                        __le64 device_addr,
+                        const struct ieee802154_llsec_device_key *key)
+{
+       struct mac802154_sub_if_data *priv = netdev_priv(dev);
+       int res;
+
+       BUG_ON(dev->type != ARPHRD_IEEE802154);
+
+       mutex_lock(&priv->sec_mtx);
+       res = mac802154_llsec_devkey_add(&priv->sec, device_addr, key);
+       mutex_unlock(&priv->sec_mtx);
+
+       return res;
+}
+
+int mac802154_del_devkey(struct net_device *dev,
+                        __le64 device_addr,
+                        const struct ieee802154_llsec_device_key *key)
+{
+       struct mac802154_sub_if_data *priv = netdev_priv(dev);
+       int res;
+
+       BUG_ON(dev->type != ARPHRD_IEEE802154);
+
+       mutex_lock(&priv->sec_mtx);
+       res = mac802154_llsec_devkey_del(&priv->sec, device_addr, key);
+       mutex_unlock(&priv->sec_mtx);
+
+       return res;
+}
+
+
+int mac802154_add_seclevel(struct net_device *dev,
+                          const struct ieee802154_llsec_seclevel *sl)
+{
+       struct mac802154_sub_if_data *priv = netdev_priv(dev);
+       int res;
+
+       BUG_ON(dev->type != ARPHRD_IEEE802154);
+
+       mutex_lock(&priv->sec_mtx);
+       res = mac802154_llsec_seclevel_add(&priv->sec, sl);
+       mutex_unlock(&priv->sec_mtx);
+
+       return res;
+}
+
+int mac802154_del_seclevel(struct net_device *dev,
+                          const struct ieee802154_llsec_seclevel *sl)
+{
+       struct mac802154_sub_if_data *priv = netdev_priv(dev);
+       int res;
+
+       BUG_ON(dev->type != ARPHRD_IEEE802154);
+
+       mutex_lock(&priv->sec_mtx);
+       res = mac802154_llsec_seclevel_del(&priv->sec, sl);
+       mutex_unlock(&priv->sec_mtx);
+
+       return res;
+}
+
+
+void mac802154_lock_table(struct net_device *dev)
+{
+       struct mac802154_sub_if_data *priv = netdev_priv(dev);
+
+       BUG_ON(dev->type != ARPHRD_IEEE802154);
+
+       mutex_lock(&priv->sec_mtx);
+}
+
+void mac802154_get_table(struct net_device *dev,
+                        struct ieee802154_llsec_table **t)
+{
+       struct mac802154_sub_if_data *priv = netdev_priv(dev);
+
+       BUG_ON(dev->type != ARPHRD_IEEE802154);
+
+       *t = &priv->sec.table;
+}
+
+void mac802154_unlock_table(struct net_device *dev)
+{
+       struct mac802154_sub_if_data *priv = netdev_priv(dev);
+
+       BUG_ON(dev->type != ARPHRD_IEEE802154);
+
+       mutex_unlock(&priv->sec_mtx);
+}
index 03855b0677ccf8efcb0819591bae63bd8609c693..0597b96dc9bac1e9150864d4e87c9566c60c149b 100644 (file)
@@ -59,8 +59,6 @@ mac802154_subif_rx(struct ieee802154_dev *hw, struct sk_buff *skb, u8 lqi)
        skb->protocol = htons(ETH_P_IEEE802154);
        skb_reset_mac_header(skb);
 
-       BUILD_BUG_ON(sizeof(struct ieee802154_mac_cb) > sizeof(skb->cb));
-
        if (!(priv->hw.flags & IEEE802154_HW_OMIT_CKSUM)) {
                u16 crc;
 
index 1df7a6a573865b4add87261300cc1d7ff01fa243..23bc91cf99c465232b0b2406f29a9bcd2b539b98 100644 (file)
 
 #include "mac802154.h"
 
+static int mac802154_wpan_update_llsec(struct net_device *dev)
+{
+       struct mac802154_sub_if_data *priv = netdev_priv(dev);
+       struct ieee802154_mlme_ops *ops = ieee802154_mlme_ops(dev);
+       int rc = 0;
+
+       if (ops->llsec) {
+               struct ieee802154_llsec_params params;
+               int changed = 0;
+
+               params.pan_id = priv->pan_id;
+               changed |= IEEE802154_LLSEC_PARAM_PAN_ID;
+
+               params.hwaddr = priv->extended_addr;
+               changed |= IEEE802154_LLSEC_PARAM_HWADDR;
+
+               rc = ops->llsec->set_params(dev, &params, changed);
+       }
+
+       return rc;
+}
+
 static int
 mac802154_wpan_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
 {
@@ -81,7 +103,7 @@ mac802154_wpan_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
                priv->pan_id = cpu_to_le16(sa->addr.pan_id);
                priv->short_addr = cpu_to_le16(sa->addr.short_addr);
 
-               err = 0;
+               err = mac802154_wpan_update_llsec(dev);
                break;
        }
 
@@ -99,7 +121,7 @@ static int mac802154_wpan_mac_addr(struct net_device *dev, void *p)
        /* FIXME: validate addr */
        memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
        mac802154_dev_set_ieee_addr(dev);
-       return 0;
+       return mac802154_wpan_update_llsec(dev);
 }
 
 int mac802154_set_mac_params(struct net_device *dev,
@@ -124,7 +146,7 @@ void mac802154_get_mac_params(struct net_device *dev,
        mutex_unlock(&priv->hw->slaves_mtx);
 }
 
-int mac802154_wpan_open(struct net_device *dev)
+static int mac802154_wpan_open(struct net_device *dev)
 {
        int rc;
        struct mac802154_sub_if_data *priv = netdev_priv(dev);
@@ -183,6 +205,38 @@ out:
        return rc;
 }
 
+static int mac802154_set_header_security(struct mac802154_sub_if_data *priv,
+                                        struct ieee802154_hdr *hdr,
+                                        const struct ieee802154_mac_cb *cb)
+{
+       struct ieee802154_llsec_params params;
+       u8 level;
+
+       mac802154_llsec_get_params(&priv->sec, &params);
+
+       if (!params.enabled && cb->secen_override && cb->secen)
+               return -EINVAL;
+       if (!params.enabled ||
+           (cb->secen_override && !cb->secen) ||
+           !params.out_level)
+               return 0;
+       if (cb->seclevel_override && !cb->seclevel)
+               return -EINVAL;
+
+       level = cb->seclevel_override ? cb->seclevel : params.out_level;
+
+       hdr->fc.security_enabled = 1;
+       hdr->sec.level = level;
+       hdr->sec.key_id_mode = params.out_key.mode;
+       if (params.out_key.mode == IEEE802154_SCF_KEY_SHORT_INDEX)
+               hdr->sec.short_src = params.out_key.short_source;
+       else if (params.out_key.mode == IEEE802154_SCF_KEY_HW_INDEX)
+               hdr->sec.extended_src = params.out_key.extended_source;
+       hdr->sec.key_id = params.out_key.id;
+
+       return 0;
+}
+
 static int mac802154_header_create(struct sk_buff *skb,
                                   struct net_device *dev,
                                   unsigned short type,
@@ -192,15 +246,20 @@ static int mac802154_header_create(struct sk_buff *skb,
 {
        struct ieee802154_hdr hdr;
        struct mac802154_sub_if_data *priv = netdev_priv(dev);
+       struct ieee802154_mac_cb *cb = mac_cb(skb);
        int hlen;
 
        if (!daddr)
                return -EINVAL;
 
        memset(&hdr.fc, 0, sizeof(hdr.fc));
-       hdr.fc.type = mac_cb_type(skb);
-       hdr.fc.security_enabled = mac_cb_is_secen(skb);
-       hdr.fc.ack_request = mac_cb_is_ackreq(skb);
+       hdr.fc.type = cb->type;
+       hdr.fc.security_enabled = cb->secen;
+       hdr.fc.ack_request = cb->ackreq;
+       hdr.seq = ieee802154_mlme_ops(dev)->get_dsn(dev);
+
+       if (mac802154_set_header_security(priv, &hdr, cb) < 0)
+               return -EINVAL;
 
        if (!saddr) {
                spin_lock_bh(&priv->mib_lock);
@@ -231,7 +290,7 @@ static int mac802154_header_create(struct sk_buff *skb,
        skb_reset_mac_header(skb);
        skb->mac_len = hlen;
 
-       if (hlen + len + 2 > dev->mtu)
+       if (len > ieee802154_max_payload(&hdr))
                return -EMSGSIZE;
 
        return hlen;
@@ -257,6 +316,7 @@ mac802154_wpan_xmit(struct sk_buff *skb, struct net_device *dev)
 {
        struct mac802154_sub_if_data *priv;
        u8 chan, page;
+       int rc;
 
        priv = netdev_priv(dev);
 
@@ -272,6 +332,13 @@ mac802154_wpan_xmit(struct sk_buff *skb, struct net_device *dev)
                return NETDEV_TX_OK;
        }
 
+       rc = mac802154_llsec_encrypt(&priv->sec, skb);
+       if (rc) {
+               pr_warn("encryption failed: %i\n", rc);
+               kfree_skb(skb);
+               return NETDEV_TX_OK;
+       }
+
        skb->skb_iif = dev->ifindex;
        dev->stats.tx_packets++;
        dev->stats.tx_bytes += skb->len;
@@ -292,6 +359,15 @@ static const struct net_device_ops mac802154_wpan_ops = {
        .ndo_set_mac_address    = mac802154_wpan_mac_addr,
 };
 
+static void mac802154_wpan_free(struct net_device *dev)
+{
+       struct mac802154_sub_if_data *priv = netdev_priv(dev);
+
+       mac802154_llsec_destroy(&priv->sec);
+
+       free_netdev(dev);
+}
+
 void mac802154_wpan_setup(struct net_device *dev)
 {
        struct mac802154_sub_if_data *priv;
@@ -301,14 +377,14 @@ void mac802154_wpan_setup(struct net_device *dev)
 
        dev->hard_header_len    = MAC802154_FRAME_HARD_HEADER_LEN;
        dev->header_ops         = &mac802154_header_ops;
-       dev->needed_tailroom    = 2; /* FCS */
+       dev->needed_tailroom    = 2 + 16; /* FCS + MIC */
        dev->mtu                = IEEE802154_MTU;
        dev->tx_queue_len       = 300;
        dev->type               = ARPHRD_IEEE802154;
        dev->flags              = IFF_NOARP | IFF_BROADCAST;
        dev->watchdog_timeo     = 0;
 
-       dev->destructor         = free_netdev;
+       dev->destructor         = mac802154_wpan_free;
        dev->netdev_ops         = &mac802154_wpan_ops;
        dev->ml_priv            = &mac802154_mlme_wpan;
 
@@ -319,6 +395,7 @@ void mac802154_wpan_setup(struct net_device *dev)
        priv->page = 0;
 
        spin_lock_init(&priv->mib_lock);
+       mutex_init(&priv->sec_mtx);
 
        get_random_bytes(&priv->bsn, 1);
        get_random_bytes(&priv->dsn, 1);
@@ -331,6 +408,8 @@ void mac802154_wpan_setup(struct net_device *dev)
 
        priv->pan_id = cpu_to_le16(IEEE802154_PANID_BROADCAST);
        priv->short_addr = cpu_to_le16(IEEE802154_ADDR_BROADCAST);
+
+       mac802154_llsec_init(&priv->sec);
 }
 
 static int mac802154_process_data(struct net_device *dev, struct sk_buff *skb)
@@ -339,9 +418,11 @@ static int mac802154_process_data(struct net_device *dev, struct sk_buff *skb)
 }
 
 static int
-mac802154_subif_frame(struct mac802154_sub_if_data *sdata, struct sk_buff *skb)
+mac802154_subif_frame(struct mac802154_sub_if_data *sdata, struct sk_buff *skb,
+                     const struct ieee802154_hdr *hdr)
 {
        __le16 span, sshort;
+       int rc;
 
        pr_debug("getting packet via slave interface %s\n", sdata->dev->name);
 
@@ -388,15 +469,21 @@ mac802154_subif_frame(struct mac802154_sub_if_data *sdata, struct sk_buff *skb)
 
        skb->dev = sdata->dev;
 
+       rc = mac802154_llsec_decrypt(&sdata->sec, skb);
+       if (rc) {
+               pr_debug("decryption failed: %i\n", rc);
+               return NET_RX_DROP;
+       }
+
        sdata->dev->stats.rx_packets++;
        sdata->dev->stats.rx_bytes += skb->len;
 
-       switch (mac_cb_type(skb)) {
+       switch (mac_cb(skb)->type) {
        case IEEE802154_FC_TYPE_DATA:
                return mac802154_process_data(sdata->dev, skb);
        default:
                pr_warn("ieee802154: bad frame received (type = %d)\n",
-                       mac_cb_type(skb));
+                       mac_cb(skb)->type);
                kfree_skb(skb);
                return NET_RX_DROP;
        }
@@ -419,62 +506,58 @@ static void mac802154_print_addr(const char *name,
        }
 }
 
-static int mac802154_parse_frame_start(struct sk_buff *skb)
+static int mac802154_parse_frame_start(struct sk_buff *skb,
+                                      struct ieee802154_hdr *hdr)
 {
        int hlen;
-       struct ieee802154_hdr hdr;
+       struct ieee802154_mac_cb *cb = mac_cb_init(skb);
 
-       hlen = ieee802154_hdr_pull(skb, &hdr);
+       hlen = ieee802154_hdr_pull(skb, hdr);
        if (hlen < 0)
                return -EINVAL;
 
        skb->mac_len = hlen;
 
-       pr_debug("fc: %04x dsn: %02x\n", le16_to_cpup((__le16 *)&hdr.fc),
-                hdr.seq);
-
-       mac_cb(skb)->flags = hdr.fc.type;
+       pr_debug("fc: %04x dsn: %02x\n", le16_to_cpup((__le16 *)&hdr->fc),
+                hdr->seq);
 
-       if (hdr.fc.ack_request)
-               mac_cb(skb)->flags |= MAC_CB_FLAG_ACKREQ;
-       if (hdr.fc.security_enabled)
-               mac_cb(skb)->flags |= MAC_CB_FLAG_SECEN;
+       cb->type = hdr->fc.type;
+       cb->ackreq = hdr->fc.ack_request;
+       cb->secen = hdr->fc.security_enabled;
 
-       mac802154_print_addr("destination", &hdr.dest);
-       mac802154_print_addr("source", &hdr.source);
+       mac802154_print_addr("destination", &hdr->dest);
+       mac802154_print_addr("source", &hdr->source);
 
-       mac_cb(skb)->source = hdr.source;
-       mac_cb(skb)->dest = hdr.dest;
+       cb->source = hdr->source;
+       cb->dest = hdr->dest;
 
-       if (hdr.fc.security_enabled) {
+       if (hdr->fc.security_enabled) {
                u64 key;
 
-               pr_debug("seclevel %i\n", hdr.sec.level);
+               pr_debug("seclevel %i\n", hdr->sec.level);
 
-               switch (hdr.sec.key_id_mode) {
+               switch (hdr->sec.key_id_mode) {
                case IEEE802154_SCF_KEY_IMPLICIT:
                        pr_debug("implicit key\n");
                        break;
 
                case IEEE802154_SCF_KEY_INDEX:
-                       pr_debug("key %02x\n", hdr.sec.key_id);
+                       pr_debug("key %02x\n", hdr->sec.key_id);
                        break;
 
                case IEEE802154_SCF_KEY_SHORT_INDEX:
                        pr_debug("key %04x:%04x %02x\n",
-                                le32_to_cpu(hdr.sec.short_src) >> 16,
-                                le32_to_cpu(hdr.sec.short_src) & 0xffff,
-                                hdr.sec.key_id);
+                                le32_to_cpu(hdr->sec.short_src) >> 16,
+                                le32_to_cpu(hdr->sec.short_src) & 0xffff,
+                                hdr->sec.key_id);
                        break;
 
                case IEEE802154_SCF_KEY_HW_INDEX:
-                       key = swab64((__force u64) hdr.sec.extended_src);
+                       key = swab64((__force u64) hdr->sec.extended_src);
                        pr_debug("key source %8phC %02x\n", &key,
-                                hdr.sec.key_id);
+                                hdr->sec.key_id);
                        break;
                }
-
-               return -EINVAL;
        }
 
        return 0;
@@ -485,8 +568,9 @@ void mac802154_wpans_rx(struct mac802154_priv *priv, struct sk_buff *skb)
        int ret;
        struct sk_buff *sskb;
        struct mac802154_sub_if_data *sdata;
+       struct ieee802154_hdr hdr;
 
-       ret = mac802154_parse_frame_start(skb);
+       ret = mac802154_parse_frame_start(skb, &hdr);
        if (ret) {
                pr_debug("got invalid frame\n");
                return;
@@ -499,7 +583,7 @@ void mac802154_wpans_rx(struct mac802154_priv *priv, struct sk_buff *skb)
 
                sskb = skb_clone(skb, GFP_ATOMIC);
                if (sskb)
-                       mac802154_subif_frame(sdata, sskb);
+                       mac802154_subif_frame(sdata, sskb, &hdr);
        }
        rcu_read_unlock();
 }
index c47444e4cf8ccc9977fa0622689b4fb55799ff4b..487b55e04337b3f83b76eb7faa0b5d4e909d41c5 100644 (file)
@@ -562,7 +562,7 @@ ip_vs_bypass_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
        ip_send_check(iph);
 
        /* Another hack: avoid icmp_send in ip_fragment */
-       skb->local_df = 1;
+       skb->ignore_df = 1;
 
        ip_vs_send_or_cont(NFPROTO_IPV4, skb, cp, 0);
        rcu_read_unlock();
@@ -590,7 +590,7 @@ ip_vs_bypass_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
                goto tx_error;
 
        /* Another hack: avoid icmp_send in ip_fragment */
-       skb->local_df = 1;
+       skb->ignore_df = 1;
 
        ip_vs_send_or_cont(NFPROTO_IPV6, skb, cp, 0);
        rcu_read_unlock();
@@ -684,7 +684,7 @@ ip_vs_nat_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
           MTU problem. */
 
        /* Another hack: avoid icmp_send in ip_fragment */
-       skb->local_df = 1;
+       skb->ignore_df = 1;
 
        rc = ip_vs_nat_send_or_cont(NFPROTO_IPV4, skb, cp, local);
        rcu_read_unlock();
@@ -774,7 +774,7 @@ ip_vs_nat_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
           MTU problem. */
 
        /* Another hack: avoid icmp_send in ip_fragment */
-       skb->local_df = 1;
+       skb->ignore_df = 1;
 
        rc = ip_vs_nat_send_or_cont(NFPROTO_IPV6, skb, cp, local);
        rcu_read_unlock();
@@ -886,7 +886,7 @@ ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
        ip_select_ident(skb, &rt->dst, NULL);
 
        /* Another hack: avoid icmp_send in ip_fragment */
-       skb->local_df = 1;
+       skb->ignore_df = 1;
 
        ret = ip_vs_tunnel_xmit_prepare(skb, cp);
        if (ret == NF_ACCEPT)
@@ -974,7 +974,7 @@ ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
        iph->hop_limit          =       old_iph->hop_limit;
 
        /* Another hack: avoid icmp_send in ip_fragment */
-       skb->local_df = 1;
+       skb->ignore_df = 1;
 
        ret = ip_vs_tunnel_xmit_prepare(skb, cp);
        if (ret == NF_ACCEPT)
@@ -1023,7 +1023,7 @@ ip_vs_dr_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
        ip_send_check(ip_hdr(skb));
 
        /* Another hack: avoid icmp_send in ip_fragment */
-       skb->local_df = 1;
+       skb->ignore_df = 1;
 
        ip_vs_send_or_cont(NFPROTO_IPV4, skb, cp, 0);
        rcu_read_unlock();
@@ -1060,7 +1060,7 @@ ip_vs_dr_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
        }
 
        /* Another hack: avoid icmp_send in ip_fragment */
-       skb->local_df = 1;
+       skb->ignore_df = 1;
 
        ip_vs_send_or_cont(NFPROTO_IPV6, skb, cp, 0);
        rcu_read_unlock();
@@ -1157,7 +1157,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
        ip_vs_nat_icmp(skb, pp, cp, 0);
 
        /* Another hack: avoid icmp_send in ip_fragment */
-       skb->local_df = 1;
+       skb->ignore_df = 1;
 
        rc = ip_vs_nat_send_or_cont(NFPROTO_IPV4, skb, cp, local);
        rcu_read_unlock();
@@ -1249,7 +1249,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
        ip_vs_nat_icmp_v6(skb, pp, cp, 0);
 
        /* Another hack: avoid icmp_send in ip_fragment */
-       skb->local_df = 1;
+       skb->ignore_df = 1;
 
        rc = ip_vs_nat_send_or_cont(NFPROTO_IPV6, skb, cp, local);
        rcu_read_unlock();
index ccc46fa5edbce5e52710a22ae502e49a0f59e0a5..58579634427d2fcbf7f35556424a959697b8655e 100644 (file)
@@ -1336,6 +1336,9 @@ ctnetlink_setup_nat(struct nf_conn *ct, const struct nlattr * const cda[])
 #ifdef CONFIG_NF_NAT_NEEDED
        int ret;
 
+       if (!cda[CTA_NAT_DST] && !cda[CTA_NAT_SRC])
+               return 0;
+
        ret = ctnetlink_parse_nat_setup(ct, NF_NAT_MANIP_DST,
                                        cda[CTA_NAT_DST]);
        if (ret < 0)
index 3fd159db9f06bc31d3da81d8cb3e5e8f2bb3ebc2..047884776586db680a9b9547ceb94e9a2035bf9c 100644 (file)
@@ -88,6 +88,45 @@ nf_tables_afinfo_lookup(struct net *net, int family, bool autoload)
        return ERR_PTR(-EAFNOSUPPORT);
 }
 
+static void nft_ctx_init(struct nft_ctx *ctx,
+                        const struct sk_buff *skb,
+                        const struct nlmsghdr *nlh,
+                        struct nft_af_info *afi,
+                        struct nft_table *table,
+                        struct nft_chain *chain,
+                        const struct nlattr * const *nla)
+{
+       ctx->net        = sock_net(skb->sk);
+       ctx->afi        = afi;
+       ctx->table      = table;
+       ctx->chain      = chain;
+       ctx->nla        = nla;
+       ctx->portid     = NETLINK_CB(skb).portid;
+       ctx->report     = nlmsg_report(nlh);
+       ctx->seq        = nlh->nlmsg_seq;
+}
+
+static struct nft_trans *nft_trans_alloc(struct nft_ctx *ctx, int msg_type,
+                                        u32 size)
+{
+       struct nft_trans *trans;
+
+       trans = kzalloc(sizeof(struct nft_trans) + size, GFP_KERNEL);
+       if (trans == NULL)
+               return NULL;
+
+       trans->msg_type = msg_type;
+       trans->ctx      = *ctx;
+
+       return trans;
+}
+
+static void nft_trans_destroy(struct nft_trans *trans)
+{
+       list_del(&trans->list);
+       kfree(trans);
+}
+
 /*
  * Tables
  */
@@ -197,20 +236,13 @@ nla_put_failure:
        return -1;
 }
 
-static int nf_tables_table_notify(const struct sk_buff *oskb,
-                                 const struct nlmsghdr *nlh,
-                                 const struct nft_table *table,
-                                 int event, int family)
+static int nf_tables_table_notify(const struct nft_ctx *ctx, int event)
 {
        struct sk_buff *skb;
-       u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
-       u32 seq = nlh ? nlh->nlmsg_seq : 0;
-       struct net *net = oskb ? sock_net(oskb->sk) : &init_net;
-       bool report;
        int err;
 
-       report = nlh ? nlmsg_report(nlh) : false;
-       if (!report && !nfnetlink_has_listeners(net, NFNLGRP_NFTABLES))
+       if (!ctx->report &&
+           !nfnetlink_has_listeners(ctx->net, NFNLGRP_NFTABLES))
                return 0;
 
        err = -ENOBUFS;
@@ -218,18 +250,20 @@ static int nf_tables_table_notify(const struct sk_buff *oskb,
        if (skb == NULL)
                goto err;
 
-       err = nf_tables_fill_table_info(skb, portid, seq, event, 0,
-                                       family, table);
+       err = nf_tables_fill_table_info(skb, ctx->portid, ctx->seq, event, 0,
+                                       ctx->afi->family, ctx->table);
        if (err < 0) {
                kfree_skb(skb);
                goto err;
        }
 
-       err = nfnetlink_send(skb, net, portid, NFNLGRP_NFTABLES, report,
-                            GFP_KERNEL);
+       err = nfnetlink_send(skb, ctx->net, ctx->portid, NFNLGRP_NFTABLES,
+                            ctx->report, GFP_KERNEL);
 err:
-       if (err < 0)
-               nfnetlink_set_err(net, portid, NFNLGRP_NFTABLES, err);
+       if (err < 0) {
+               nfnetlink_set_err(ctx->net, ctx->portid, NFNLGRP_NFTABLES,
+                                 err);
+       }
        return err;
 }
 
@@ -269,6 +303,9 @@ done:
        return skb->len;
 }
 
+/* Internal table flags */
+#define NFT_TABLE_INACTIVE     (1 << 15)
+
 static int nf_tables_gettable(struct sock *nlsk, struct sk_buff *skb,
                              const struct nlmsghdr *nlh,
                              const struct nlattr * const nla[])
@@ -295,6 +332,8 @@ static int nf_tables_gettable(struct sock *nlsk, struct sk_buff *skb,
        table = nf_tables_table_lookup(afi, nla[NFTA_TABLE_NAME]);
        if (IS_ERR(table))
                return PTR_ERR(table);
+       if (table->flags & NFT_TABLE_INACTIVE)
+               return -ENOENT;
 
        skb2 = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
        if (!skb2)
@@ -343,7 +382,7 @@ err:
        return err;
 }
 
-static int nf_tables_table_disable(const struct nft_af_info *afi,
+static void nf_tables_table_disable(const struct nft_af_info *afi,
                                   struct nft_table *table)
 {
        struct nft_chain *chain;
@@ -353,45 +392,63 @@ static int nf_tables_table_disable(const struct nft_af_info *afi,
                        nf_unregister_hooks(nft_base_chain(chain)->ops,
                                            afi->nops);
        }
-
-       return 0;
 }
 
-static int nf_tables_updtable(struct sock *nlsk, struct sk_buff *skb,
-                             const struct nlmsghdr *nlh,
-                             const struct nlattr * const nla[],
-                             struct nft_af_info *afi, struct nft_table *table)
+static int nf_tables_updtable(struct nft_ctx *ctx)
 {
-       const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
-       int family = nfmsg->nfgen_family, ret = 0;
+       struct nft_trans *trans;
+       u32 flags;
+       int ret = 0;
 
-       if (nla[NFTA_TABLE_FLAGS]) {
-               u32 flags;
+       if (!ctx->nla[NFTA_TABLE_FLAGS])
+               return 0;
 
-               flags = ntohl(nla_get_be32(nla[NFTA_TABLE_FLAGS]));
-               if (flags & ~NFT_TABLE_F_DORMANT)
-                       return -EINVAL;
+       flags = ntohl(nla_get_be32(ctx->nla[NFTA_TABLE_FLAGS]));
+       if (flags & ~NFT_TABLE_F_DORMANT)
+               return -EINVAL;
+
+       trans = nft_trans_alloc(ctx, NFT_MSG_NEWTABLE,
+                               sizeof(struct nft_trans_table));
+       if (trans == NULL)
+               return -ENOMEM;
 
-               if ((flags & NFT_TABLE_F_DORMANT) &&
-                   !(table->flags & NFT_TABLE_F_DORMANT)) {
-                       ret = nf_tables_table_disable(afi, table);
-                       if (ret >= 0)
-                               table->flags |= NFT_TABLE_F_DORMANT;
-               } else if (!(flags & NFT_TABLE_F_DORMANT) &&
-                          table->flags & NFT_TABLE_F_DORMANT) {
-                       ret = nf_tables_table_enable(afi, table);
-                       if (ret >= 0)
-                               table->flags &= ~NFT_TABLE_F_DORMANT;
+       if ((flags & NFT_TABLE_F_DORMANT) &&
+           !(ctx->table->flags & NFT_TABLE_F_DORMANT)) {
+               nft_trans_table_enable(trans) = false;
+       } else if (!(flags & NFT_TABLE_F_DORMANT) &&
+                  ctx->table->flags & NFT_TABLE_F_DORMANT) {
+               ret = nf_tables_table_enable(ctx->afi, ctx->table);
+               if (ret >= 0) {
+                       ctx->table->flags &= ~NFT_TABLE_F_DORMANT;
+                       nft_trans_table_enable(trans) = true;
                }
-               if (ret < 0)
-                       goto err;
        }
+       if (ret < 0)
+               goto err;
 
-       nf_tables_table_notify(skb, nlh, table, NFT_MSG_NEWTABLE, family);
+       nft_trans_table_update(trans) = true;
+       list_add_tail(&trans->list, &ctx->net->nft.commit_list);
+       return 0;
 err:
+       nft_trans_destroy(trans);
        return ret;
 }
 
+static int nft_trans_table_add(struct nft_ctx *ctx, int msg_type)
+{
+       struct nft_trans *trans;
+
+       trans = nft_trans_alloc(ctx, msg_type, sizeof(struct nft_trans_table));
+       if (trans == NULL)
+               return -ENOMEM;
+
+       if (msg_type == NFT_MSG_NEWTABLE)
+               ctx->table->flags |= NFT_TABLE_INACTIVE;
+
+       list_add_tail(&trans->list, &ctx->net->nft.commit_list);
+       return 0;
+}
+
 static int nf_tables_newtable(struct sock *nlsk, struct sk_buff *skb,
                              const struct nlmsghdr *nlh,
                              const struct nlattr * const nla[])
@@ -403,6 +460,8 @@ static int nf_tables_newtable(struct sock *nlsk, struct sk_buff *skb,
        struct net *net = sock_net(skb->sk);
        int family = nfmsg->nfgen_family;
        u32 flags = 0;
+       struct nft_ctx ctx;
+       int err;
 
        afi = nf_tables_afinfo_lookup(net, family, true);
        if (IS_ERR(afi))
@@ -417,11 +476,15 @@ static int nf_tables_newtable(struct sock *nlsk, struct sk_buff *skb,
        }
 
        if (table != NULL) {
+               if (table->flags & NFT_TABLE_INACTIVE)
+                       return -ENOENT;
                if (nlh->nlmsg_flags & NLM_F_EXCL)
                        return -EEXIST;
                if (nlh->nlmsg_flags & NLM_F_REPLACE)
                        return -EOPNOTSUPP;
-               return nf_tables_updtable(nlsk, skb, nlh, nla, afi, table);
+
+               nft_ctx_init(&ctx, skb, nlh, afi, table, NULL, nla);
+               return nf_tables_updtable(&ctx);
        }
 
        if (nla[NFTA_TABLE_FLAGS]) {
@@ -444,8 +507,14 @@ static int nf_tables_newtable(struct sock *nlsk, struct sk_buff *skb,
        INIT_LIST_HEAD(&table->sets);
        table->flags = flags;
 
+       nft_ctx_init(&ctx, skb, nlh, afi, table, NULL, nla);
+       err = nft_trans_table_add(&ctx, NFT_MSG_NEWTABLE);
+       if (err < 0) {
+               kfree(table);
+               module_put(afi->owner);
+               return err;
+       }
        list_add_tail(&table->list, &afi->tables);
-       nf_tables_table_notify(skb, nlh, table, NFT_MSG_NEWTABLE, family);
        return 0;
 }
 
@@ -457,7 +526,8 @@ static int nf_tables_deltable(struct sock *nlsk, struct sk_buff *skb,
        struct nft_af_info *afi;
        struct nft_table *table;
        struct net *net = sock_net(skb->sk);
-       int family = nfmsg->nfgen_family;
+       int family = nfmsg->nfgen_family, err;
+       struct nft_ctx ctx;
 
        afi = nf_tables_afinfo_lookup(net, family, false);
        if (IS_ERR(afi))
@@ -466,17 +536,27 @@ static int nf_tables_deltable(struct sock *nlsk, struct sk_buff *skb,
        table = nf_tables_table_lookup(afi, nla[NFTA_TABLE_NAME]);
        if (IS_ERR(table))
                return PTR_ERR(table);
+       if (table->flags & NFT_TABLE_INACTIVE)
+               return -ENOENT;
 
        if (!list_empty(&table->chains) || !list_empty(&table->sets))
                return -EBUSY;
 
+       nft_ctx_init(&ctx, skb, nlh, afi, table, NULL, nla);
+       err = nft_trans_table_add(&ctx, NFT_MSG_DELTABLE);
+       if (err < 0)
+               return err;
+
        list_del(&table->list);
-       nf_tables_table_notify(skb, nlh, table, NFT_MSG_DELTABLE, family);
-       kfree(table);
-       module_put(afi->owner);
        return 0;
 }
 
+static void nf_tables_table_destroy(struct nft_ctx *ctx)
+{
+       kfree(ctx->table);
+       module_put(ctx->afi->owner);
+}
+
 int nft_register_chain_type(const struct nf_chain_type *ctype)
 {
        int err = 0;
@@ -541,7 +621,7 @@ static const struct nla_policy nft_chain_policy[NFTA_CHAIN_MAX + 1] = {
                                    .len = NFT_CHAIN_MAXNAMELEN - 1 },
        [NFTA_CHAIN_HOOK]       = { .type = NLA_NESTED },
        [NFTA_CHAIN_POLICY]     = { .type = NLA_U32 },
-       [NFTA_CHAIN_TYPE]       = { .type = NLA_NUL_STRING },
+       [NFTA_CHAIN_TYPE]       = { .type = NLA_STRING },
        [NFTA_CHAIN_COUNTERS]   = { .type = NLA_NESTED },
 };
 
@@ -637,21 +717,13 @@ nla_put_failure:
        return -1;
 }
 
-static int nf_tables_chain_notify(const struct sk_buff *oskb,
-                                 const struct nlmsghdr *nlh,
-                                 const struct nft_table *table,
-                                 const struct nft_chain *chain,
-                                 int event, int family)
+static int nf_tables_chain_notify(const struct nft_ctx *ctx, int event)
 {
        struct sk_buff *skb;
-       u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
-       struct net *net = oskb ? sock_net(oskb->sk) : &init_net;
-       u32 seq = nlh ? nlh->nlmsg_seq : 0;
-       bool report;
        int err;
 
-       report = nlh ? nlmsg_report(nlh) : false;
-       if (!report && !nfnetlink_has_listeners(net, NFNLGRP_NFTABLES))
+       if (!ctx->report &&
+           !nfnetlink_has_listeners(ctx->net, NFNLGRP_NFTABLES))
                return 0;
 
        err = -ENOBUFS;
@@ -659,18 +731,21 @@ static int nf_tables_chain_notify(const struct sk_buff *oskb,
        if (skb == NULL)
                goto err;
 
-       err = nf_tables_fill_chain_info(skb, portid, seq, event, 0, family,
-                                       table, chain);
+       err = nf_tables_fill_chain_info(skb, ctx->portid, ctx->seq, event, 0,
+                                       ctx->afi->family, ctx->table,
+                                       ctx->chain);
        if (err < 0) {
                kfree_skb(skb);
                goto err;
        }
 
-       err = nfnetlink_send(skb, net, portid, NFNLGRP_NFTABLES, report,
-                            GFP_KERNEL);
+       err = nfnetlink_send(skb, ctx->net, ctx->portid, NFNLGRP_NFTABLES,
+                            ctx->report, GFP_KERNEL);
 err:
-       if (err < 0)
-               nfnetlink_set_err(net, portid, NFNLGRP_NFTABLES, err);
+       if (err < 0) {
+               nfnetlink_set_err(ctx->net, ctx->portid, NFNLGRP_NFTABLES,
+                                 err);
+       }
        return err;
 }
 
@@ -740,10 +815,14 @@ static int nf_tables_getchain(struct sock *nlsk, struct sk_buff *skb,
        table = nf_tables_table_lookup(afi, nla[NFTA_CHAIN_TABLE]);
        if (IS_ERR(table))
                return PTR_ERR(table);
+       if (table->flags & NFT_TABLE_INACTIVE)
+               return -ENOENT;
 
        chain = nf_tables_chain_lookup(table, nla[NFTA_CHAIN_NAME]);
        if (IS_ERR(chain))
                return PTR_ERR(chain);
+       if (chain->flags & NFT_CHAIN_INACTIVE)
+               return -ENOENT;
 
        skb2 = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
        if (!skb2)
@@ -767,8 +846,7 @@ static const struct nla_policy nft_counter_policy[NFTA_COUNTER_MAX + 1] = {
        [NFTA_COUNTER_BYTES]    = { .type = NLA_U64 },
 };
 
-static int
-nf_tables_counters(struct nft_base_chain *chain, const struct nlattr *attr)
+static struct nft_stats __percpu *nft_stats_alloc(const struct nlattr *attr)
 {
        struct nlattr *tb[NFTA_COUNTER_MAX+1];
        struct nft_stats __percpu *newstats;
@@ -777,14 +855,14 @@ nf_tables_counters(struct nft_base_chain *chain, const struct nlattr *attr)
 
        err = nla_parse_nested(tb, NFTA_COUNTER_MAX, attr, nft_counter_policy);
        if (err < 0)
-               return err;
+               return ERR_PTR(err);
 
        if (!tb[NFTA_COUNTER_BYTES] || !tb[NFTA_COUNTER_PACKETS])
-               return -EINVAL;
+               return ERR_PTR(-EINVAL);
 
        newstats = alloc_percpu(struct nft_stats);
        if (newstats == NULL)
-               return -ENOMEM;
+               return ERR_PTR(-ENOMEM);
 
        /* Restore old counters on this cpu, no problem. Per-cpu statistics
         * are not exposed to userspace.
@@ -793,6 +871,12 @@ nf_tables_counters(struct nft_base_chain *chain, const struct nlattr *attr)
        stats->bytes = be64_to_cpu(nla_get_be64(tb[NFTA_COUNTER_BYTES]));
        stats->pkts = be64_to_cpu(nla_get_be64(tb[NFTA_COUNTER_PACKETS]));
 
+       return newstats;
+}
+
+static void nft_chain_stats_replace(struct nft_base_chain *chain,
+                                   struct nft_stats __percpu *newstats)
+{
        if (chain->stats) {
                struct nft_stats __percpu *oldstats =
                                nft_dereference(chain->stats);
@@ -802,17 +886,43 @@ nf_tables_counters(struct nft_base_chain *chain, const struct nlattr *attr)
                free_percpu(oldstats);
        } else
                rcu_assign_pointer(chain->stats, newstats);
+}
+
+static int nft_trans_chain_add(struct nft_ctx *ctx, int msg_type)
+{
+       struct nft_trans *trans;
 
+       trans = nft_trans_alloc(ctx, msg_type, sizeof(struct nft_trans_chain));
+       if (trans == NULL)
+               return -ENOMEM;
+
+       if (msg_type == NFT_MSG_NEWCHAIN)
+               ctx->chain->flags |= NFT_CHAIN_INACTIVE;
+
+       list_add_tail(&trans->list, &ctx->net->nft.commit_list);
        return 0;
 }
 
+static void nf_tables_chain_destroy(struct nft_chain *chain)
+{
+       BUG_ON(chain->use > 0);
+
+       if (chain->flags & NFT_BASE_CHAIN) {
+               module_put(nft_base_chain(chain)->type->owner);
+               free_percpu(nft_base_chain(chain)->stats);
+               kfree(nft_base_chain(chain));
+       } else {
+               kfree(chain);
+       }
+}
+
 static int nf_tables_newchain(struct sock *nlsk, struct sk_buff *skb,
                              const struct nlmsghdr *nlh,
                              const struct nlattr * const nla[])
 {
        const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
        const struct nlattr * uninitialized_var(name);
-       const struct nft_af_info *afi;
+       struct nft_af_info *afi;
        struct nft_table *table;
        struct nft_chain *chain;
        struct nft_base_chain *basechain = NULL;
@@ -822,8 +932,10 @@ static int nf_tables_newchain(struct sock *nlsk, struct sk_buff *skb,
        u8 policy = NF_ACCEPT;
        u64 handle = 0;
        unsigned int i;
+       struct nft_stats __percpu *stats;
        int err;
        bool create;
+       struct nft_ctx ctx;
 
        create = nlh->nlmsg_flags & NLM_F_CREATE ? true : false;
 
@@ -869,6 +981,11 @@ static int nf_tables_newchain(struct sock *nlsk, struct sk_buff *skb,
        }
 
        if (chain != NULL) {
+               struct nft_stats *stats = NULL;
+               struct nft_trans *trans;
+
+               if (chain->flags & NFT_CHAIN_INACTIVE)
+                       return -ENOENT;
                if (nlh->nlmsg_flags & NLM_F_EXCL)
                        return -EEXIST;
                if (nlh->nlmsg_flags & NLM_F_REPLACE)
@@ -882,19 +999,31 @@ static int nf_tables_newchain(struct sock *nlsk, struct sk_buff *skb,
                        if (!(chain->flags & NFT_BASE_CHAIN))
                                return -EOPNOTSUPP;
 
-                       err = nf_tables_counters(nft_base_chain(chain),
-                                                nla[NFTA_CHAIN_COUNTERS]);
-                       if (err < 0)
-                               return err;
+                       stats = nft_stats_alloc(nla[NFTA_CHAIN_COUNTERS]);
+                       if (IS_ERR(stats))
+                               return PTR_ERR(stats);
                }
 
-               if (nla[NFTA_CHAIN_POLICY])
-                       nft_base_chain(chain)->policy = policy;
+               nft_ctx_init(&ctx, skb, nlh, afi, table, chain, nla);
+               trans = nft_trans_alloc(&ctx, NFT_MSG_NEWCHAIN,
+                                       sizeof(struct nft_trans_chain));
+               if (trans == NULL)
+                       return -ENOMEM;
+
+               nft_trans_chain_stats(trans) = stats;
+               nft_trans_chain_update(trans) = true;
 
-               if (nla[NFTA_CHAIN_HANDLE] && name)
-                       nla_strlcpy(chain->name, name, NFT_CHAIN_MAXNAMELEN);
+               if (nla[NFTA_CHAIN_POLICY])
+                       nft_trans_chain_policy(trans) = policy;
+               else
+                       nft_trans_chain_policy(trans) = -1;
 
-               goto notify;
+               if (nla[NFTA_CHAIN_HANDLE] && name) {
+                       nla_strlcpy(nft_trans_chain_name(trans), name,
+                                   NFT_CHAIN_MAXNAMELEN);
+               }
+               list_add_tail(&trans->list, &net->nft.commit_list);
+               return 0;
        }
 
        if (table->use == UINT_MAX)
@@ -939,23 +1068,21 @@ static int nf_tables_newchain(struct sock *nlsk, struct sk_buff *skb,
                        return -ENOMEM;
 
                if (nla[NFTA_CHAIN_COUNTERS]) {
-                       err = nf_tables_counters(basechain,
-                                                nla[NFTA_CHAIN_COUNTERS]);
-                       if (err < 0) {
+                       stats = nft_stats_alloc(nla[NFTA_CHAIN_COUNTERS]);
+                       if (IS_ERR(stats)) {
                                module_put(type->owner);
                                kfree(basechain);
-                               return err;
+                               return PTR_ERR(stats);
                        }
+                       basechain->stats = stats;
                } else {
-                       struct nft_stats __percpu *newstats;
-
-                       newstats = alloc_percpu(struct nft_stats);
-                       if (newstats == NULL) {
+                       stats = alloc_percpu(struct nft_stats);
+                       if (IS_ERR(stats)) {
                                module_put(type->owner);
                                kfree(basechain);
-                               return -ENOMEM;
+                               return PTR_ERR(stats);
                        }
-                       rcu_assign_pointer(basechain->stats, newstats);
+                       rcu_assign_pointer(basechain->stats, stats);
                }
 
                basechain->type = type;
@@ -992,31 +1119,26 @@ static int nf_tables_newchain(struct sock *nlsk, struct sk_buff *skb,
        if (!(table->flags & NFT_TABLE_F_DORMANT) &&
            chain->flags & NFT_BASE_CHAIN) {
                err = nf_register_hooks(nft_base_chain(chain)->ops, afi->nops);
-               if (err < 0) {
-                       module_put(basechain->type->owner);
-                       free_percpu(basechain->stats);
-                       kfree(basechain);
-                       return err;
-               }
+               if (err < 0)
+                       goto err1;
        }
-       list_add_tail(&chain->list, &table->chains);
-       table->use++;
-notify:
-       nf_tables_chain_notify(skb, nlh, table, chain, NFT_MSG_NEWCHAIN,
-                              family);
-       return 0;
-}
 
-static void nf_tables_chain_destroy(struct nft_chain *chain)
-{
-       BUG_ON(chain->use > 0);
+       nft_ctx_init(&ctx, skb, nlh, afi, table, chain, nla);
+       err = nft_trans_chain_add(&ctx, NFT_MSG_NEWCHAIN);
+       if (err < 0)
+               goto err2;
 
-       if (chain->flags & NFT_BASE_CHAIN) {
-               module_put(nft_base_chain(chain)->type->owner);
-               free_percpu(nft_base_chain(chain)->stats);
-               kfree(nft_base_chain(chain));
-       } else
-               kfree(chain);
+       list_add_tail(&chain->list, &table->chains);
+       return 0;
+err2:
+       if (!(table->flags & NFT_TABLE_F_DORMANT) &&
+           chain->flags & NFT_BASE_CHAIN) {
+               nf_unregister_hooks(nft_base_chain(chain)->ops,
+                                   afi->nops);
+       }
+err1:
+       nf_tables_chain_destroy(chain);
+       return err;
 }
 
 static int nf_tables_delchain(struct sock *nlsk, struct sk_buff *skb,
@@ -1024,11 +1146,13 @@ static int nf_tables_delchain(struct sock *nlsk, struct sk_buff *skb,
                              const struct nlattr * const nla[])
 {
        const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
-       const struct nft_af_info *afi;
+       struct nft_af_info *afi;
        struct nft_table *table;
        struct nft_chain *chain;
        struct net *net = sock_net(skb->sk);
        int family = nfmsg->nfgen_family;
+       struct nft_ctx ctx;
+       int err;
 
        afi = nf_tables_afinfo_lookup(net, family, false);
        if (IS_ERR(afi))
@@ -1037,48 +1161,26 @@ static int nf_tables_delchain(struct sock *nlsk, struct sk_buff *skb,
        table = nf_tables_table_lookup(afi, nla[NFTA_CHAIN_TABLE]);
        if (IS_ERR(table))
                return PTR_ERR(table);
+       if (table->flags & NFT_TABLE_INACTIVE)
+               return -ENOENT;
 
        chain = nf_tables_chain_lookup(table, nla[NFTA_CHAIN_NAME]);
        if (IS_ERR(chain))
                return PTR_ERR(chain);
-
+       if (chain->flags & NFT_CHAIN_INACTIVE)
+               return -ENOENT;
        if (!list_empty(&chain->rules) || chain->use > 0)
                return -EBUSY;
 
-       list_del(&chain->list);
-       table->use--;
-
-       if (!(table->flags & NFT_TABLE_F_DORMANT) &&
-           chain->flags & NFT_BASE_CHAIN)
-               nf_unregister_hooks(nft_base_chain(chain)->ops, afi->nops);
-
-       nf_tables_chain_notify(skb, nlh, table, chain, NFT_MSG_DELCHAIN,
-                              family);
-
-       /* Make sure all rule references are gone before this is released */
-       synchronize_rcu();
+       nft_ctx_init(&ctx, skb, nlh, afi, table, chain, nla);
+       err = nft_trans_chain_add(&ctx, NFT_MSG_DELCHAIN);
+       if (err < 0)
+               return err;
 
-       nf_tables_chain_destroy(chain);
+       list_del(&chain->list);
        return 0;
 }
 
-static void nft_ctx_init(struct nft_ctx *ctx,
-                        const struct sk_buff *skb,
-                        const struct nlmsghdr *nlh,
-                        const struct nft_af_info *afi,
-                        const struct nft_table *table,
-                        const struct nft_chain *chain,
-                        const struct nlattr * const *nla)
-{
-       ctx->net   = sock_net(skb->sk);
-       ctx->skb   = skb;
-       ctx->nlh   = nlh;
-       ctx->afi   = afi;
-       ctx->table = table;
-       ctx->chain = chain;
-       ctx->nla   = nla;
-}
-
 /*
  * Expressions
  */
@@ -1093,7 +1195,10 @@ static void nft_ctx_init(struct nft_ctx *ctx,
 int nft_register_expr(struct nft_expr_type *type)
 {
        nfnl_lock(NFNL_SUBSYS_NFTABLES);
-       list_add_tail(&type->list, &nf_tables_expressions);
+       if (type->family == NFPROTO_UNSPEC)
+               list_add_tail(&type->list, &nf_tables_expressions);
+       else
+               list_add(&type->list, &nf_tables_expressions);
        nfnl_unlock(NFNL_SUBSYS_NFTABLES);
        return 0;
 }
@@ -1361,22 +1466,15 @@ nla_put_failure:
        return -1;
 }
 
-static int nf_tables_rule_notify(const struct sk_buff *oskb,
-                                const struct nlmsghdr *nlh,
-                                const struct nft_table *table,
-                                const struct nft_chain *chain,
+static int nf_tables_rule_notify(const struct nft_ctx *ctx,
                                 const struct nft_rule *rule,
-                                int event, u32 flags, int family)
+                                int event)
 {
        struct sk_buff *skb;
-       u32 portid = NETLINK_CB(oskb).portid;
-       struct net *net = oskb ? sock_net(oskb->sk) : &init_net;
-       u32 seq = nlh->nlmsg_seq;
-       bool report;
        int err;
 
-       report = nlmsg_report(nlh);
-       if (!report && !nfnetlink_has_listeners(net, NFNLGRP_NFTABLES))
+       if (!ctx->report &&
+           !nfnetlink_has_listeners(ctx->net, NFNLGRP_NFTABLES))
                return 0;
 
        err = -ENOBUFS;
@@ -1384,18 +1482,21 @@ static int nf_tables_rule_notify(const struct sk_buff *oskb,
        if (skb == NULL)
                goto err;
 
-       err = nf_tables_fill_rule_info(skb, portid, seq, event, flags,
-                                      family, table, chain, rule);
+       err = nf_tables_fill_rule_info(skb, ctx->portid, ctx->seq, event, 0,
+                                      ctx->afi->family, ctx->table,
+                                      ctx->chain, rule);
        if (err < 0) {
                kfree_skb(skb);
                goto err;
        }
 
-       err = nfnetlink_send(skb, net, portid, NFNLGRP_NFTABLES, report,
-                            GFP_KERNEL);
+       err = nfnetlink_send(skb, ctx->net, ctx->portid, NFNLGRP_NFTABLES,
+                            ctx->report, GFP_KERNEL);
 err:
-       if (err < 0)
-               nfnetlink_set_err(net, portid, NFNLGRP_NFTABLES, err);
+       if (err < 0) {
+               nfnetlink_set_err(ctx->net, ctx->portid, NFNLGRP_NFTABLES,
+                                 err);
+       }
        return err;
 }
 
@@ -1511,10 +1612,14 @@ static int nf_tables_getrule(struct sock *nlsk, struct sk_buff *skb,
        table = nf_tables_table_lookup(afi, nla[NFTA_RULE_TABLE]);
        if (IS_ERR(table))
                return PTR_ERR(table);
+       if (table->flags & NFT_TABLE_INACTIVE)
+               return -ENOENT;
 
        chain = nf_tables_chain_lookup(table, nla[NFTA_RULE_CHAIN]);
        if (IS_ERR(chain))
                return PTR_ERR(chain);
+       if (chain->flags & NFT_CHAIN_INACTIVE)
+               return -ENOENT;
 
        rule = nf_tables_rule_lookup(chain, nla[NFTA_RULE_HANDLE]);
        if (IS_ERR(rule))
@@ -1554,37 +1659,36 @@ static void nf_tables_rule_destroy(const struct nft_ctx *ctx,
        kfree(rule);
 }
 
-#define NFT_RULE_MAXEXPRS      128
-
-static struct nft_expr_info *info;
-
-static struct nft_rule_trans *
-nf_tables_trans_add(struct nft_ctx *ctx, struct nft_rule *rule)
+static struct nft_trans *nft_trans_rule_add(struct nft_ctx *ctx, int msg_type,
+                                           struct nft_rule *rule)
 {
-       struct nft_rule_trans *rupd;
+       struct nft_trans *trans;
 
-       rupd = kmalloc(sizeof(struct nft_rule_trans), GFP_KERNEL);
-       if (rupd == NULL)
-              return NULL;
+       trans = nft_trans_alloc(ctx, msg_type, sizeof(struct nft_trans_rule));
+       if (trans == NULL)
+               return NULL;
 
-       rupd->ctx = *ctx;
-       rupd->rule = rule;
-       list_add_tail(&rupd->list, &ctx->net->nft.commit_list);
+       nft_trans_rule(trans) = rule;
+       list_add_tail(&trans->list, &ctx->net->nft.commit_list);
 
-       return rupd;
+       return trans;
 }
 
+#define NFT_RULE_MAXEXPRS      128
+
+static struct nft_expr_info *info;
+
 static int nf_tables_newrule(struct sock *nlsk, struct sk_buff *skb,
                             const struct nlmsghdr *nlh,
                             const struct nlattr * const nla[])
 {
        const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
-       const struct nft_af_info *afi;
+       struct nft_af_info *afi;
        struct net *net = sock_net(skb->sk);
        struct nft_table *table;
        struct nft_chain *chain;
        struct nft_rule *rule, *old_rule = NULL;
-       struct nft_rule_trans *repl = NULL;
+       struct nft_trans *trans = NULL;
        struct nft_expr *expr;
        struct nft_ctx ctx;
        struct nlattr *tmp;
@@ -1682,8 +1786,9 @@ static int nf_tables_newrule(struct sock *nlsk, struct sk_buff *skb,
 
        if (nlh->nlmsg_flags & NLM_F_REPLACE) {
                if (nft_rule_is_active_next(net, old_rule)) {
-                       repl = nf_tables_trans_add(&ctx, old_rule);
-                       if (repl == NULL) {
+                       trans = nft_trans_rule_add(&ctx, NFT_MSG_NEWRULE,
+                                                  old_rule);
+                       if (trans == NULL) {
                                err = -ENOMEM;
                                goto err2;
                        }
@@ -1705,7 +1810,7 @@ static int nf_tables_newrule(struct sock *nlsk, struct sk_buff *skb,
                        list_add_rcu(&rule->list, &chain->rules);
        }
 
-       if (nf_tables_trans_add(&ctx, rule) == NULL) {
+       if (nft_trans_rule_add(&ctx, NFT_MSG_NEWRULE, rule) == NULL) {
                err = -ENOMEM;
                goto err3;
        }
@@ -1713,11 +1818,10 @@ static int nf_tables_newrule(struct sock *nlsk, struct sk_buff *skb,
 
 err3:
        list_del_rcu(&rule->list);
-       if (repl) {
-               list_del_rcu(&repl->rule->list);
-               list_del(&repl->list);
-               nft_rule_clear(net, repl->rule);
-               kfree(repl);
+       if (trans) {
+               list_del_rcu(&nft_trans_rule(trans)->list);
+               nft_rule_clear(net, nft_trans_rule(trans));
+               nft_trans_destroy(trans);
        }
 err2:
        nf_tables_rule_destroy(&ctx, rule);
@@ -1734,7 +1838,7 @@ nf_tables_delrule_one(struct nft_ctx *ctx, struct nft_rule *rule)
 {
        /* You cannot delete the same rule twice */
        if (nft_rule_is_active_next(ctx->net, rule)) {
-               if (nf_tables_trans_add(ctx, rule) == NULL)
+               if (nft_trans_rule_add(ctx, NFT_MSG_DELRULE, rule) == NULL)
                        return -ENOMEM;
                nft_rule_disactivate_next(ctx->net, rule);
                return 0;
@@ -1760,9 +1864,9 @@ static int nf_tables_delrule(struct sock *nlsk, struct sk_buff *skb,
                             const struct nlattr * const nla[])
 {
        const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
-       const struct nft_af_info *afi;
+       struct nft_af_info *afi;
        struct net *net = sock_net(skb->sk);
-       const struct nft_table *table;
+       struct nft_table *table;
        struct nft_chain *chain = NULL;
        struct nft_rule *rule;
        int family = nfmsg->nfgen_family, err = 0;
@@ -1775,6 +1879,8 @@ static int nf_tables_delrule(struct sock *nlsk, struct sk_buff *skb,
        table = nf_tables_table_lookup(afi, nla[NFTA_RULE_TABLE]);
        if (IS_ERR(table))
                return PTR_ERR(table);
+       if (table->flags & NFT_TABLE_INACTIVE)
+               return -ENOENT;
 
        if (nla[NFTA_RULE_CHAIN]) {
                chain = nf_tables_chain_lookup(table, nla[NFTA_RULE_CHAIN]);
@@ -1807,88 +1913,6 @@ static int nf_tables_delrule(struct sock *nlsk, struct sk_buff *skb,
        return err;
 }
 
-static int nf_tables_commit(struct sk_buff *skb)
-{
-       struct net *net = sock_net(skb->sk);
-       struct nft_rule_trans *rupd, *tmp;
-
-       /* Bump generation counter, invalidate any dump in progress */
-       net->nft.genctr++;
-
-       /* A new generation has just started */
-       net->nft.gencursor = gencursor_next(net);
-
-       /* Make sure all packets have left the previous generation before
-        * purging old rules.
-        */
-       synchronize_rcu();
-
-       list_for_each_entry_safe(rupd, tmp, &net->nft.commit_list, list) {
-               /* This rule was inactive in the past and just became active.
-                * Clear the next bit of the genmask since its meaning has
-                * changed, now it is the future.
-                */
-               if (nft_rule_is_active(net, rupd->rule)) {
-                       nft_rule_clear(net, rupd->rule);
-                       nf_tables_rule_notify(skb, rupd->ctx.nlh,
-                                             rupd->ctx.table, rupd->ctx.chain,
-                                             rupd->rule, NFT_MSG_NEWRULE, 0,
-                                             rupd->ctx.afi->family);
-                       list_del(&rupd->list);
-                       kfree(rupd);
-                       continue;
-               }
-
-               /* This rule is in the past, get rid of it */
-               list_del_rcu(&rupd->rule->list);
-               nf_tables_rule_notify(skb, rupd->ctx.nlh,
-                                     rupd->ctx.table, rupd->ctx.chain,
-                                     rupd->rule, NFT_MSG_DELRULE, 0,
-                                     rupd->ctx.afi->family);
-       }
-
-       /* Make sure we don't see any packet traversing old rules */
-       synchronize_rcu();
-
-       /* Now we can safely release unused old rules */
-       list_for_each_entry_safe(rupd, tmp, &net->nft.commit_list, list) {
-               nf_tables_rule_destroy(&rupd->ctx, rupd->rule);
-               list_del(&rupd->list);
-               kfree(rupd);
-       }
-
-       return 0;
-}
-
-static int nf_tables_abort(struct sk_buff *skb)
-{
-       struct net *net = sock_net(skb->sk);
-       struct nft_rule_trans *rupd, *tmp;
-
-       list_for_each_entry_safe(rupd, tmp, &net->nft.commit_list, list) {
-               if (!nft_rule_is_active_next(net, rupd->rule)) {
-                       nft_rule_clear(net, rupd->rule);
-                       list_del(&rupd->list);
-                       kfree(rupd);
-                       continue;
-               }
-
-               /* This rule is inactive, get rid of it */
-               list_del_rcu(&rupd->rule->list);
-       }
-
-       /* Make sure we don't see any packet accessing aborted rules */
-       synchronize_rcu();
-
-       list_for_each_entry_safe(rupd, tmp, &net->nft.commit_list, list) {
-               nf_tables_rule_destroy(&rupd->ctx, rupd->rule);
-               list_del(&rupd->list);
-               kfree(rupd);
-       }
-
-       return 0;
-}
-
 /*
  * Sets
  */
@@ -1912,9 +1936,18 @@ void nft_unregister_set(struct nft_set_ops *ops)
 }
 EXPORT_SYMBOL_GPL(nft_unregister_set);
 
-static const struct nft_set_ops *nft_select_set_ops(const struct nlattr * const nla[])
+/*
+ * Select a set implementation based on the data characteristics and the
+ * given policy. The total memory use might not be known if no size is
+ * given, in that case the amount of memory per element is used.
+ */
+static const struct nft_set_ops *
+nft_select_set_ops(const struct nlattr * const nla[],
+                  const struct nft_set_desc *desc,
+                  enum nft_set_policies policy)
 {
-       const struct nft_set_ops *ops;
+       const struct nft_set_ops *ops, *bops;
+       struct nft_set_estimate est, best;
        u32 features;
 
 #ifdef CONFIG_MODULES
@@ -1932,15 +1965,45 @@ static const struct nft_set_ops *nft_select_set_ops(const struct nlattr * const
                features &= NFT_SET_INTERVAL | NFT_SET_MAP;
        }
 
-       // FIXME: implement selection properly
+       bops       = NULL;
+       best.size  = ~0;
+       best.class = ~0;
+
        list_for_each_entry(ops, &nf_tables_set_ops, list) {
                if ((ops->features & features) != features)
                        continue;
+               if (!ops->estimate(desc, features, &est))
+                       continue;
+
+               switch (policy) {
+               case NFT_SET_POL_PERFORMANCE:
+                       if (est.class < best.class)
+                               break;
+                       if (est.class == best.class && est.size < best.size)
+                               break;
+                       continue;
+               case NFT_SET_POL_MEMORY:
+                       if (est.size < best.size)
+                               break;
+                       if (est.size == best.size && est.class < best.class)
+                               break;
+                       continue;
+               default:
+                       break;
+               }
+
                if (!try_module_get(ops->owner))
                        continue;
-               return ops;
+               if (bops != NULL)
+                       module_put(bops->owner);
+
+               bops = ops;
+               best = est;
        }
 
+       if (bops != NULL)
+               return bops;
+
        return ERR_PTR(-EOPNOTSUPP);
 }
 
@@ -1953,6 +2016,13 @@ static const struct nla_policy nft_set_policy[NFTA_SET_MAX + 1] = {
        [NFTA_SET_KEY_LEN]              = { .type = NLA_U32 },
        [NFTA_SET_DATA_TYPE]            = { .type = NLA_U32 },
        [NFTA_SET_DATA_LEN]             = { .type = NLA_U32 },
+       [NFTA_SET_POLICY]               = { .type = NLA_U32 },
+       [NFTA_SET_DESC]                 = { .type = NLA_NESTED },
+       [NFTA_SET_ID]                   = { .type = NLA_U32 },
+};
+
+static const struct nla_policy nft_set_desc_policy[NFTA_SET_DESC_MAX + 1] = {
+       [NFTA_SET_DESC_SIZE]            = { .type = NLA_U32 },
 };
 
 static int nft_ctx_init_from_setattr(struct nft_ctx *ctx,
@@ -1962,8 +2032,8 @@ static int nft_ctx_init_from_setattr(struct nft_ctx *ctx,
 {
        struct net *net = sock_net(skb->sk);
        const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
-       const struct nft_af_info *afi = NULL;
-       const struct nft_table *table = NULL;
+       struct nft_af_info *afi = NULL;
+       struct nft_table *table = NULL;
 
        if (nfmsg->nfgen_family != NFPROTO_UNSPEC) {
                afi = nf_tables_afinfo_lookup(net, nfmsg->nfgen_family, false);
@@ -1978,6 +2048,8 @@ static int nft_ctx_init_from_setattr(struct nft_ctx *ctx,
                table = nf_tables_table_lookup(afi, nla[NFTA_SET_TABLE]);
                if (IS_ERR(table))
                        return PTR_ERR(table);
+               if (table->flags & NFT_TABLE_INACTIVE)
+                       return -ENOENT;
        }
 
        nft_ctx_init(ctx, skb, nlh, afi, table, NULL, nla);
@@ -1999,13 +2071,27 @@ struct nft_set *nf_tables_set_lookup(const struct nft_table *table,
        return ERR_PTR(-ENOENT);
 }
 
+struct nft_set *nf_tables_set_lookup_byid(const struct net *net,
+                                         const struct nlattr *nla)
+{
+       struct nft_trans *trans;
+       u32 id = ntohl(nla_get_be32(nla));
+
+       list_for_each_entry(trans, &net->nft.commit_list, list) {
+               if (trans->msg_type == NFT_MSG_NEWSET &&
+                   id == nft_trans_set_id(trans))
+                       return nft_trans_set(trans);
+       }
+       return ERR_PTR(-ENOENT);
+}
+
 static int nf_tables_set_alloc_name(struct nft_ctx *ctx, struct nft_set *set,
                                    const char *name)
 {
        const struct nft_set *i;
        const char *p;
        unsigned long *inuse;
-       unsigned int n = 0;
+       unsigned int n = 0, min = 0;
 
        p = strnchr(name, IFNAMSIZ, '%');
        if (p != NULL) {
@@ -2015,23 +2101,28 @@ static int nf_tables_set_alloc_name(struct nft_ctx *ctx, struct nft_set *set,
                inuse = (unsigned long *)get_zeroed_page(GFP_KERNEL);
                if (inuse == NULL)
                        return -ENOMEM;
-
+cont:
                list_for_each_entry(i, &ctx->table->sets, list) {
                        int tmp;
 
                        if (!sscanf(i->name, name, &tmp))
                                continue;
-                       if (tmp < 0 || tmp >= BITS_PER_BYTE * PAGE_SIZE)
+                       if (tmp < min || tmp >= min + BITS_PER_BYTE * PAGE_SIZE)
                                continue;
 
-                       set_bit(tmp, inuse);
+                       set_bit(tmp - min, inuse);
                }
 
                n = find_first_zero_bit(inuse, BITS_PER_BYTE * PAGE_SIZE);
+               if (n >= BITS_PER_BYTE * PAGE_SIZE) {
+                       min += BITS_PER_BYTE * PAGE_SIZE;
+                       memset(inuse, 0, PAGE_SIZE);
+                       goto cont;
+               }
                free_page((unsigned long)inuse);
        }
 
-       snprintf(set->name, sizeof(set->name), name, n);
+       snprintf(set->name, sizeof(set->name), name, min + n);
        list_for_each_entry(i, &ctx->table->sets, list) {
                if (!strcmp(set->name, i->name))
                        return -ENFILE;
@@ -2044,8 +2135,9 @@ static int nf_tables_fill_set(struct sk_buff *skb, const struct nft_ctx *ctx,
 {
        struct nfgenmsg *nfmsg;
        struct nlmsghdr *nlh;
-       u32 portid = NETLINK_CB(ctx->skb).portid;
-       u32 seq = ctx->nlh->nlmsg_seq;
+       struct nlattr *desc;
+       u32 portid = ctx->portid;
+       u32 seq = ctx->seq;
 
        event |= NFNL_SUBSYS_NFTABLES << 8;
        nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct nfgenmsg),
@@ -2077,6 +2169,14 @@ static int nf_tables_fill_set(struct sk_buff *skb, const struct nft_ctx *ctx,
                        goto nla_put_failure;
        }
 
+       desc = nla_nest_start(skb, NFTA_SET_DESC);
+       if (desc == NULL)
+               goto nla_put_failure;
+       if (set->size &&
+           nla_put_be32(skb, NFTA_SET_DESC_SIZE, htonl(set->size)))
+               goto nla_put_failure;
+       nla_nest_end(skb, desc);
+
        return nlmsg_end(skb, nlh);
 
 nla_put_failure:
@@ -2089,12 +2189,11 @@ static int nf_tables_set_notify(const struct nft_ctx *ctx,
                                int event)
 {
        struct sk_buff *skb;
-       u32 portid = NETLINK_CB(ctx->skb).portid;
-       bool report;
+       u32 portid = ctx->portid;
        int err;
 
-       report = nlmsg_report(ctx->nlh);
-       if (!report && !nfnetlink_has_listeners(ctx->net, NFNLGRP_NFTABLES))
+       if (!ctx->report &&
+           !nfnetlink_has_listeners(ctx->net, NFNLGRP_NFTABLES))
                return 0;
 
        err = -ENOBUFS;
@@ -2108,8 +2207,8 @@ static int nf_tables_set_notify(const struct nft_ctx *ctx,
                goto err;
        }
 
-       err = nfnetlink_send(skb, ctx->net, portid, NFNLGRP_NFTABLES, report,
-                            GFP_KERNEL);
+       err = nfnetlink_send(skb, ctx->net, portid, NFNLGRP_NFTABLES,
+                            ctx->report, GFP_KERNEL);
 err:
        if (err < 0)
                nfnetlink_set_err(ctx->net, portid, NFNLGRP_NFTABLES, err);
@@ -2183,7 +2282,7 @@ static int nf_tables_dump_sets_all(struct nft_ctx *ctx, struct sk_buff *skb,
 {
        const struct nft_set *set;
        unsigned int idx, s_idx = cb->args[0];
-       const struct nft_af_info *afi;
+       struct nft_af_info *afi;
        struct nft_table *table, *cur_table = (struct nft_table *)cb->args[2];
        struct net *net = sock_net(skb->sk);
        int cur_family = cb->args[3];
@@ -2260,6 +2359,8 @@ static int nf_tables_dump_sets(struct sk_buff *skb, struct netlink_callback *cb)
        return ret;
 }
 
+#define NFT_SET_INACTIVE       (1 << 15)       /* Internal set flag */
+
 static int nf_tables_getset(struct sock *nlsk, struct sk_buff *skb,
                            const struct nlmsghdr *nlh,
                            const struct nlattr * const nla[])
@@ -2289,6 +2390,8 @@ static int nf_tables_getset(struct sock *nlsk, struct sk_buff *skb,
        set = nf_tables_set_lookup(ctx.table, nla[NFTA_SET_NAME]);
        if (IS_ERR(set))
                return PTR_ERR(set);
+       if (set->flags & NFT_SET_INACTIVE)
+               return -ENOENT;
 
        skb2 = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
        if (skb2 == NULL)
@@ -2305,13 +2408,50 @@ err:
        return err;
 }
 
+static int nf_tables_set_desc_parse(const struct nft_ctx *ctx,
+                                   struct nft_set_desc *desc,
+                                   const struct nlattr *nla)
+{
+       struct nlattr *da[NFTA_SET_DESC_MAX + 1];
+       int err;
+
+       err = nla_parse_nested(da, NFTA_SET_DESC_MAX, nla, nft_set_desc_policy);
+       if (err < 0)
+               return err;
+
+       if (da[NFTA_SET_DESC_SIZE] != NULL)
+               desc->size = ntohl(nla_get_be32(da[NFTA_SET_DESC_SIZE]));
+
+       return 0;
+}
+
+static int nft_trans_set_add(struct nft_ctx *ctx, int msg_type,
+                            struct nft_set *set)
+{
+       struct nft_trans *trans;
+
+       trans = nft_trans_alloc(ctx, msg_type, sizeof(struct nft_trans_set));
+       if (trans == NULL)
+               return -ENOMEM;
+
+       if (msg_type == NFT_MSG_NEWSET && ctx->nla[NFTA_SET_ID] != NULL) {
+               nft_trans_set_id(trans) =
+                       ntohl(nla_get_be32(ctx->nla[NFTA_SET_ID]));
+               set->flags |= NFT_SET_INACTIVE;
+       }
+       nft_trans_set(trans) = set;
+       list_add_tail(&trans->list, &ctx->net->nft.commit_list);
+
+       return 0;
+}
+
 static int nf_tables_newset(struct sock *nlsk, struct sk_buff *skb,
                            const struct nlmsghdr *nlh,
                            const struct nlattr * const nla[])
 {
        const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
        const struct nft_set_ops *ops;
-       const struct nft_af_info *afi;
+       struct nft_af_info *afi;
        struct net *net = sock_net(skb->sk);
        struct nft_table *table;
        struct nft_set *set;
@@ -2319,14 +2459,18 @@ static int nf_tables_newset(struct sock *nlsk, struct sk_buff *skb,
        char name[IFNAMSIZ];
        unsigned int size;
        bool create;
-       u32 ktype, klen, dlen, dtype, flags;
+       u32 ktype, dtype, flags, policy;
+       struct nft_set_desc desc;
        int err;
 
        if (nla[NFTA_SET_TABLE] == NULL ||
            nla[NFTA_SET_NAME] == NULL ||
-           nla[NFTA_SET_KEY_LEN] == NULL)
+           nla[NFTA_SET_KEY_LEN] == NULL ||
+           nla[NFTA_SET_ID] == NULL)
                return -EINVAL;
 
+       memset(&desc, 0, sizeof(desc));
+
        ktype = NFT_DATA_VALUE;
        if (nla[NFTA_SET_KEY_TYPE] != NULL) {
                ktype = ntohl(nla_get_be32(nla[NFTA_SET_KEY_TYPE]));
@@ -2334,8 +2478,8 @@ static int nf_tables_newset(struct sock *nlsk, struct sk_buff *skb,
                        return -EINVAL;
        }
 
-       klen = ntohl(nla_get_be32(nla[NFTA_SET_KEY_LEN]));
-       if (klen == 0 || klen > FIELD_SIZEOF(struct nft_data, data))
+       desc.klen = ntohl(nla_get_be32(nla[NFTA_SET_KEY_LEN]));
+       if (desc.klen == 0 || desc.klen > FIELD_SIZEOF(struct nft_data, data))
                return -EINVAL;
 
        flags = 0;
@@ -2347,7 +2491,6 @@ static int nf_tables_newset(struct sock *nlsk, struct sk_buff *skb,
        }
 
        dtype = 0;
-       dlen  = 0;
        if (nla[NFTA_SET_DATA_TYPE] != NULL) {
                if (!(flags & NFT_SET_MAP))
                        return -EINVAL;
@@ -2360,15 +2503,25 @@ static int nf_tables_newset(struct sock *nlsk, struct sk_buff *skb,
                if (dtype != NFT_DATA_VERDICT) {
                        if (nla[NFTA_SET_DATA_LEN] == NULL)
                                return -EINVAL;
-                       dlen = ntohl(nla_get_be32(nla[NFTA_SET_DATA_LEN]));
-                       if (dlen == 0 ||
-                           dlen > FIELD_SIZEOF(struct nft_data, data))
+                       desc.dlen = ntohl(nla_get_be32(nla[NFTA_SET_DATA_LEN]));
+                       if (desc.dlen == 0 ||
+                           desc.dlen > FIELD_SIZEOF(struct nft_data, data))
                                return -EINVAL;
                } else
-                       dlen = sizeof(struct nft_data);
+                       desc.dlen = sizeof(struct nft_data);
        } else if (flags & NFT_SET_MAP)
                return -EINVAL;
 
+       policy = NFT_SET_POL_PERFORMANCE;
+       if (nla[NFTA_SET_POLICY] != NULL)
+               policy = ntohl(nla_get_be32(nla[NFTA_SET_POLICY]));
+
+       if (nla[NFTA_SET_DESC] != NULL) {
+               err = nf_tables_set_desc_parse(&ctx, &desc, nla[NFTA_SET_DESC]);
+               if (err < 0)
+                       return err;
+       }
+
        create = nlh->nlmsg_flags & NLM_F_CREATE ? true : false;
 
        afi = nf_tables_afinfo_lookup(net, nfmsg->nfgen_family, create);
@@ -2399,7 +2552,7 @@ static int nf_tables_newset(struct sock *nlsk, struct sk_buff *skb,
        if (!(nlh->nlmsg_flags & NLM_F_CREATE))
                return -ENOENT;
 
-       ops = nft_select_set_ops(nla);
+       ops = nft_select_set_ops(nla, &desc, policy);
        if (IS_ERR(ops))
                return PTR_ERR(ops);
 
@@ -2420,17 +2573,21 @@ static int nf_tables_newset(struct sock *nlsk, struct sk_buff *skb,
        INIT_LIST_HEAD(&set->bindings);
        set->ops   = ops;
        set->ktype = ktype;
-       set->klen  = klen;
+       set->klen  = desc.klen;
        set->dtype = dtype;
-       set->dlen  = dlen;
+       set->dlen  = desc.dlen;
        set->flags = flags;
+       set->size  = desc.size;
 
-       err = ops->init(set, nla);
+       err = ops->init(set, &desc, nla);
+       if (err < 0)
+               goto err2;
+
+       err = nft_trans_set_add(&ctx, NFT_MSG_NEWSET, set);
        if (err < 0)
                goto err2;
 
        list_add_tail(&set->list, &table->sets);
-       nf_tables_set_notify(&ctx, set, NFT_MSG_NEWSET);
        return 0;
 
 err2:
@@ -2440,16 +2597,20 @@ err1:
        return err;
 }
 
-static void nf_tables_set_destroy(const struct nft_ctx *ctx, struct nft_set *set)
+static void nft_set_destroy(struct nft_set *set)
 {
-       list_del(&set->list);
-       nf_tables_set_notify(ctx, set, NFT_MSG_DELSET);
-
        set->ops->destroy(set);
        module_put(set->ops->owner);
        kfree(set);
 }
 
+static void nf_tables_set_destroy(const struct nft_ctx *ctx, struct nft_set *set)
+{
+       list_del(&set->list);
+       nf_tables_set_notify(ctx, set, NFT_MSG_DELSET);
+       nft_set_destroy(set);
+}
+
 static int nf_tables_delset(struct sock *nlsk, struct sk_buff *skb,
                            const struct nlmsghdr *nlh,
                            const struct nlattr * const nla[])
@@ -2471,10 +2632,16 @@ static int nf_tables_delset(struct sock *nlsk, struct sk_buff *skb,
        set = nf_tables_set_lookup(ctx.table, nla[NFTA_SET_NAME]);
        if (IS_ERR(set))
                return PTR_ERR(set);
+       if (set->flags & NFT_SET_INACTIVE)
+               return -ENOENT;
        if (!list_empty(&set->bindings))
                return -EBUSY;
 
-       nf_tables_set_destroy(&ctx, set);
+       err = nft_trans_set_add(&ctx, NFT_MSG_DELSET, set);
+       if (err < 0)
+               return err;
+
+       list_del(&set->list);
        return 0;
 }
 
@@ -2534,7 +2701,8 @@ void nf_tables_unbind_set(const struct nft_ctx *ctx, struct nft_set *set,
 {
        list_del(&binding->list);
 
-       if (list_empty(&set->bindings) && set->flags & NFT_SET_ANONYMOUS)
+       if (list_empty(&set->bindings) && set->flags & NFT_SET_ANONYMOUS &&
+           !(set->flags & NFT_SET_INACTIVE))
                nf_tables_set_destroy(ctx, set);
 }
 
@@ -2552,16 +2720,18 @@ static const struct nla_policy nft_set_elem_list_policy[NFTA_SET_ELEM_LIST_MAX +
        [NFTA_SET_ELEM_LIST_TABLE]      = { .type = NLA_STRING },
        [NFTA_SET_ELEM_LIST_SET]        = { .type = NLA_STRING },
        [NFTA_SET_ELEM_LIST_ELEMENTS]   = { .type = NLA_NESTED },
+       [NFTA_SET_ELEM_LIST_SET_ID]     = { .type = NLA_U32 },
 };
 
 static int nft_ctx_init_from_elemattr(struct nft_ctx *ctx,
                                      const struct sk_buff *skb,
                                      const struct nlmsghdr *nlh,
-                                     const struct nlattr * const nla[])
+                                     const struct nlattr * const nla[],
+                                     bool trans)
 {
        const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
-       const struct nft_af_info *afi;
-       const struct nft_table *table;
+       struct nft_af_info *afi;
+       struct nft_table *table;
        struct net *net = sock_net(skb->sk);
 
        afi = nf_tables_afinfo_lookup(net, nfmsg->nfgen_family, false);
@@ -2571,6 +2741,8 @@ static int nft_ctx_init_from_elemattr(struct nft_ctx *ctx,
        table = nf_tables_table_lookup(afi, nla[NFTA_SET_ELEM_LIST_TABLE]);
        if (IS_ERR(table))
                return PTR_ERR(table);
+       if (!trans && (table->flags & NFT_TABLE_INACTIVE))
+               return -ENOENT;
 
        nft_ctx_init(ctx, skb, nlh, afi, table, NULL, nla);
        return 0;
@@ -2644,13 +2816,16 @@ static int nf_tables_dump_set(struct sk_buff *skb, struct netlink_callback *cb)
        if (err < 0)
                return err;
 
-       err = nft_ctx_init_from_elemattr(&ctx, cb->skb, cb->nlh, (void *)nla);
+       err = nft_ctx_init_from_elemattr(&ctx, cb->skb, cb->nlh, (void *)nla,
+                                        false);
        if (err < 0)
                return err;
 
        set = nf_tables_set_lookup(ctx.table, nla[NFTA_SET_ELEM_LIST_SET]);
        if (IS_ERR(set))
                return PTR_ERR(set);
+       if (set->flags & NFT_SET_INACTIVE)
+               return -ENOENT;
 
        event  = NFT_MSG_NEWSETELEM;
        event |= NFNL_SUBSYS_NFTABLES << 8;
@@ -2707,13 +2882,15 @@ static int nf_tables_getsetelem(struct sock *nlsk, struct sk_buff *skb,
        struct nft_ctx ctx;
        int err;
 
-       err = nft_ctx_init_from_elemattr(&ctx, skb, nlh, nla);
+       err = nft_ctx_init_from_elemattr(&ctx, skb, nlh, nla, false);
        if (err < 0)
                return err;
 
        set = nf_tables_set_lookup(ctx.table, nla[NFTA_SET_ELEM_LIST_SET]);
        if (IS_ERR(set))
                return PTR_ERR(set);
+       if (set->flags & NFT_SET_INACTIVE)
+               return -ENOENT;
 
        if (nlh->nlmsg_flags & NLM_F_DUMP) {
                struct netlink_dump_control c = {
@@ -2724,7 +2901,98 @@ static int nf_tables_getsetelem(struct sock *nlsk, struct sk_buff *skb,
        return -EOPNOTSUPP;
 }
 
-static int nft_add_set_elem(const struct nft_ctx *ctx, struct nft_set *set,
+static int nf_tables_fill_setelem_info(struct sk_buff *skb,
+                                      const struct nft_ctx *ctx, u32 seq,
+                                      u32 portid, int event, u16 flags,
+                                      const struct nft_set *set,
+                                      const struct nft_set_elem *elem)
+{
+       struct nfgenmsg *nfmsg;
+       struct nlmsghdr *nlh;
+       struct nlattr *nest;
+       int err;
+
+       event |= NFNL_SUBSYS_NFTABLES << 8;
+       nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct nfgenmsg),
+                       flags);
+       if (nlh == NULL)
+               goto nla_put_failure;
+
+       nfmsg = nlmsg_data(nlh);
+       nfmsg->nfgen_family     = ctx->afi->family;
+       nfmsg->version          = NFNETLINK_V0;
+       nfmsg->res_id           = 0;
+
+       if (nla_put_string(skb, NFTA_SET_TABLE, ctx->table->name))
+               goto nla_put_failure;
+       if (nla_put_string(skb, NFTA_SET_NAME, set->name))
+               goto nla_put_failure;
+
+       nest = nla_nest_start(skb, NFTA_SET_ELEM_LIST_ELEMENTS);
+       if (nest == NULL)
+               goto nla_put_failure;
+
+       err = nf_tables_fill_setelem(skb, set, elem);
+       if (err < 0)
+               goto nla_put_failure;
+
+       nla_nest_end(skb, nest);
+
+       return nlmsg_end(skb, nlh);
+
+nla_put_failure:
+       nlmsg_trim(skb, nlh);
+       return -1;
+}
+
+static int nf_tables_setelem_notify(const struct nft_ctx *ctx,
+                                   const struct nft_set *set,
+                                   const struct nft_set_elem *elem,
+                                   int event, u16 flags)
+{
+       struct net *net = ctx->net;
+       u32 portid = ctx->portid;
+       struct sk_buff *skb;
+       int err;
+
+       if (!ctx->report && !nfnetlink_has_listeners(net, NFNLGRP_NFTABLES))
+               return 0;
+
+       err = -ENOBUFS;
+       skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
+       if (skb == NULL)
+               goto err;
+
+       err = nf_tables_fill_setelem_info(skb, ctx, 0, portid, event, flags,
+                                         set, elem);
+       if (err < 0) {
+               kfree_skb(skb);
+               goto err;
+       }
+
+       err = nfnetlink_send(skb, net, portid, NFNLGRP_NFTABLES, ctx->report,
+                            GFP_KERNEL);
+err:
+       if (err < 0)
+               nfnetlink_set_err(net, portid, NFNLGRP_NFTABLES, err);
+       return err;
+}
+
+static struct nft_trans *nft_trans_elem_alloc(struct nft_ctx *ctx,
+                                             int msg_type,
+                                             struct nft_set *set)
+{
+       struct nft_trans *trans;
+
+       trans = nft_trans_alloc(ctx, msg_type, sizeof(struct nft_trans_elem));
+       if (trans == NULL)
+               return NULL;
+
+       nft_trans_elem_set(trans) = set;
+       return trans;
+}
+
+static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
                            const struct nlattr *attr)
 {
        struct nlattr *nla[NFTA_SET_ELEM_MAX + 1];
@@ -2732,8 +3000,12 @@ static int nft_add_set_elem(const struct nft_ctx *ctx, struct nft_set *set,
        struct nft_set_elem elem;
        struct nft_set_binding *binding;
        enum nft_registers dreg;
+       struct nft_trans *trans;
        int err;
 
+       if (set->size && set->nelems == set->size)
+               return -ENFILE;
+
        err = nla_parse_nested(nla, NFTA_SET_ELEM_MAX, attr,
                               nft_set_elem_policy);
        if (err < 0)
@@ -2786,7 +3058,7 @@ static int nft_add_set_elem(const struct nft_ctx *ctx, struct nft_set *set,
                        struct nft_ctx bind_ctx = {
                                .afi    = ctx->afi,
                                .table  = ctx->table,
-                               .chain  = binding->chain,
+                               .chain  = (struct nft_chain *)binding->chain,
                        };
 
                        err = nft_validate_data_load(&bind_ctx, dreg,
@@ -2796,12 +3068,20 @@ static int nft_add_set_elem(const struct nft_ctx *ctx, struct nft_set *set,
                }
        }
 
+       trans = nft_trans_elem_alloc(ctx, NFT_MSG_NEWSETELEM, set);
+       if (trans == NULL)
+               goto err3;
+
        err = set->ops->insert(set, &elem);
        if (err < 0)
-               goto err3;
+               goto err4;
 
+       nft_trans_elem(trans) = elem;
+       list_add(&trans->list, &ctx->net->nft.commit_list);
        return 0;
 
+err4:
+       kfree(trans);
 err3:
        if (nla[NFTA_SET_ELEM_DATA] != NULL)
                nft_data_uninit(&elem.data, d2.type);
@@ -2815,35 +3095,44 @@ static int nf_tables_newsetelem(struct sock *nlsk, struct sk_buff *skb,
                                const struct nlmsghdr *nlh,
                                const struct nlattr * const nla[])
 {
+       struct net *net = sock_net(skb->sk);
        const struct nlattr *attr;
        struct nft_set *set;
        struct nft_ctx ctx;
-       int rem, err;
+       int rem, err = 0;
 
-       err = nft_ctx_init_from_elemattr(&ctx, skb, nlh, nla);
+       err = nft_ctx_init_from_elemattr(&ctx, skb, nlh, nla, true);
        if (err < 0)
                return err;
 
        set = nf_tables_set_lookup(ctx.table, nla[NFTA_SET_ELEM_LIST_SET]);
-       if (IS_ERR(set))
-               return PTR_ERR(set);
+       if (IS_ERR(set)) {
+               if (nla[NFTA_SET_ELEM_LIST_SET_ID]) {
+                       set = nf_tables_set_lookup_byid(net,
+                                       nla[NFTA_SET_ELEM_LIST_SET_ID]);
+               }
+               if (IS_ERR(set))
+                       return PTR_ERR(set);
+       }
+
        if (!list_empty(&set->bindings) && set->flags & NFT_SET_CONSTANT)
                return -EBUSY;
 
        nla_for_each_nested(attr, nla[NFTA_SET_ELEM_LIST_ELEMENTS], rem) {
                err = nft_add_set_elem(&ctx, set, attr);
                if (err < 0)
-                       return err;
+                       break;
        }
-       return 0;
+       return err;
 }
 
-static int nft_del_setelem(const struct nft_ctx *ctx, struct nft_set *set,
+static int nft_del_setelem(struct nft_ctx *ctx, struct nft_set *set,
                           const struct nlattr *attr)
 {
        struct nlattr *nla[NFTA_SET_ELEM_MAX + 1];
        struct nft_data_desc desc;
        struct nft_set_elem elem;
+       struct nft_trans *trans;
        int err;
 
        err = nla_parse_nested(nla, NFTA_SET_ELEM_MAX, attr,
@@ -2867,7 +3156,12 @@ static int nft_del_setelem(const struct nft_ctx *ctx, struct nft_set *set,
        if (err < 0)
                goto err2;
 
-       set->ops->remove(set, &elem);
+       trans = nft_trans_elem_alloc(ctx, NFT_MSG_DELSETELEM, set);
+       if (trans == NULL)
+               goto err2;
+
+       nft_trans_elem(trans) = elem;
+       list_add(&trans->list, &ctx->net->nft.commit_list);
 
        nft_data_uninit(&elem.key, NFT_DATA_VALUE);
        if (set->flags & NFT_SET_MAP)
@@ -2886,9 +3180,9 @@ static int nf_tables_delsetelem(struct sock *nlsk, struct sk_buff *skb,
        const struct nlattr *attr;
        struct nft_set *set;
        struct nft_ctx ctx;
-       int rem, err;
+       int rem, err = 0;
 
-       err = nft_ctx_init_from_elemattr(&ctx, skb, nlh, nla);
+       err = nft_ctx_init_from_elemattr(&ctx, skb, nlh, nla, false);
        if (err < 0)
                return err;
 
@@ -2901,14 +3195,14 @@ static int nf_tables_delsetelem(struct sock *nlsk, struct sk_buff *skb,
        nla_for_each_nested(attr, nla[NFTA_SET_ELEM_LIST_ELEMENTS], rem) {
                err = nft_del_setelem(&ctx, set, attr);
                if (err < 0)
-                       return err;
+                       break;
        }
-       return 0;
+       return err;
 }
 
 static const struct nfnl_callback nf_tables_cb[NFT_MSG_MAX] = {
        [NFT_MSG_NEWTABLE] = {
-               .call           = nf_tables_newtable,
+               .call_batch     = nf_tables_newtable,
                .attr_count     = NFTA_TABLE_MAX,
                .policy         = nft_table_policy,
        },
@@ -2918,12 +3212,12 @@ static const struct nfnl_callback nf_tables_cb[NFT_MSG_MAX] = {
                .policy         = nft_table_policy,
        },
        [NFT_MSG_DELTABLE] = {
-               .call           = nf_tables_deltable,
+               .call_batch     = nf_tables_deltable,
                .attr_count     = NFTA_TABLE_MAX,
                .policy         = nft_table_policy,
        },
        [NFT_MSG_NEWCHAIN] = {
-               .call           = nf_tables_newchain,
+               .call_batch     = nf_tables_newchain,
                .attr_count     = NFTA_CHAIN_MAX,
                .policy         = nft_chain_policy,
        },
@@ -2933,7 +3227,7 @@ static const struct nfnl_callback nf_tables_cb[NFT_MSG_MAX] = {
                .policy         = nft_chain_policy,
        },
        [NFT_MSG_DELCHAIN] = {
-               .call           = nf_tables_delchain,
+               .call_batch     = nf_tables_delchain,
                .attr_count     = NFTA_CHAIN_MAX,
                .policy         = nft_chain_policy,
        },
@@ -2953,7 +3247,7 @@ static const struct nfnl_callback nf_tables_cb[NFT_MSG_MAX] = {
                .policy         = nft_rule_policy,
        },
        [NFT_MSG_NEWSET] = {
-               .call           = nf_tables_newset,
+               .call_batch     = nf_tables_newset,
                .attr_count     = NFTA_SET_MAX,
                .policy         = nft_set_policy,
        },
@@ -2963,12 +3257,12 @@ static const struct nfnl_callback nf_tables_cb[NFT_MSG_MAX] = {
                .policy         = nft_set_policy,
        },
        [NFT_MSG_DELSET] = {
-               .call           = nf_tables_delset,
+               .call_batch     = nf_tables_delset,
                .attr_count     = NFTA_SET_MAX,
                .policy         = nft_set_policy,
        },
        [NFT_MSG_NEWSETELEM] = {
-               .call           = nf_tables_newsetelem,
+               .call_batch     = nf_tables_newsetelem,
                .attr_count     = NFTA_SET_ELEM_LIST_MAX,
                .policy         = nft_set_elem_list_policy,
        },
@@ -2978,12 +3272,270 @@ static const struct nfnl_callback nf_tables_cb[NFT_MSG_MAX] = {
                .policy         = nft_set_elem_list_policy,
        },
        [NFT_MSG_DELSETELEM] = {
-               .call           = nf_tables_delsetelem,
+               .call_batch     = nf_tables_delsetelem,
                .attr_count     = NFTA_SET_ELEM_LIST_MAX,
                .policy         = nft_set_elem_list_policy,
        },
 };
 
+static void nft_chain_commit_update(struct nft_trans *trans)
+{
+       struct nft_base_chain *basechain;
+
+       if (nft_trans_chain_name(trans)[0])
+               strcpy(trans->ctx.chain->name, nft_trans_chain_name(trans));
+
+       if (!(trans->ctx.chain->flags & NFT_BASE_CHAIN))
+               return;
+
+       basechain = nft_base_chain(trans->ctx.chain);
+       nft_chain_stats_replace(basechain, nft_trans_chain_stats(trans));
+
+       switch (nft_trans_chain_policy(trans)) {
+       case NF_DROP:
+       case NF_ACCEPT:
+               basechain->policy = nft_trans_chain_policy(trans);
+               break;
+       }
+}
+
+/* Schedule objects for release via rcu to make sure no packets are accesing
+ * removed rules.
+ */
+static void nf_tables_commit_release_rcu(struct rcu_head *rt)
+{
+       struct nft_trans *trans = container_of(rt, struct nft_trans, rcu_head);
+
+       switch (trans->msg_type) {
+       case NFT_MSG_DELTABLE:
+               nf_tables_table_destroy(&trans->ctx);
+               break;
+       case NFT_MSG_DELCHAIN:
+               nf_tables_chain_destroy(trans->ctx.chain);
+               break;
+       case NFT_MSG_DELRULE:
+               nf_tables_rule_destroy(&trans->ctx, nft_trans_rule(trans));
+               break;
+       case NFT_MSG_DELSET:
+               nft_set_destroy(nft_trans_set(trans));
+               break;
+       }
+       kfree(trans);
+}
+
+static int nf_tables_commit(struct sk_buff *skb)
+{
+       struct net *net = sock_net(skb->sk);
+       struct nft_trans *trans, *next;
+       struct nft_set *set;
+
+       /* Bump generation counter, invalidate any dump in progress */
+       net->nft.genctr++;
+
+       /* A new generation has just started */
+       net->nft.gencursor = gencursor_next(net);
+
+       /* Make sure all packets have left the previous generation before
+        * purging old rules.
+        */
+       synchronize_rcu();
+
+       list_for_each_entry_safe(trans, next, &net->nft.commit_list, list) {
+               switch (trans->msg_type) {
+               case NFT_MSG_NEWTABLE:
+                       if (nft_trans_table_update(trans)) {
+                               if (!nft_trans_table_enable(trans)) {
+                                       nf_tables_table_disable(trans->ctx.afi,
+                                                               trans->ctx.table);
+                                       trans->ctx.table->flags |= NFT_TABLE_F_DORMANT;
+                               }
+                       } else {
+                               trans->ctx.table->flags &= ~NFT_TABLE_INACTIVE;
+                       }
+                       nf_tables_table_notify(&trans->ctx, NFT_MSG_NEWTABLE);
+                       nft_trans_destroy(trans);
+                       break;
+               case NFT_MSG_DELTABLE:
+                       nf_tables_table_notify(&trans->ctx, NFT_MSG_DELTABLE);
+                       break;
+               case NFT_MSG_NEWCHAIN:
+                       if (nft_trans_chain_update(trans))
+                               nft_chain_commit_update(trans);
+                       else {
+                               trans->ctx.chain->flags &= ~NFT_CHAIN_INACTIVE;
+                               trans->ctx.table->use++;
+                       }
+                       nf_tables_chain_notify(&trans->ctx, NFT_MSG_NEWCHAIN);
+                       nft_trans_destroy(trans);
+                       break;
+               case NFT_MSG_DELCHAIN:
+                       trans->ctx.table->use--;
+                       nf_tables_chain_notify(&trans->ctx, NFT_MSG_DELCHAIN);
+                       if (!(trans->ctx.table->flags & NFT_TABLE_F_DORMANT) &&
+                           trans->ctx.chain->flags & NFT_BASE_CHAIN) {
+                               nf_unregister_hooks(nft_base_chain(trans->ctx.chain)->ops,
+                                                   trans->ctx.afi->nops);
+                       }
+                       break;
+               case NFT_MSG_NEWRULE:
+                       nft_rule_clear(trans->ctx.net, nft_trans_rule(trans));
+                       nf_tables_rule_notify(&trans->ctx,
+                                             nft_trans_rule(trans),
+                                             NFT_MSG_NEWRULE);
+                       nft_trans_destroy(trans);
+                       break;
+               case NFT_MSG_DELRULE:
+                       list_del_rcu(&nft_trans_rule(trans)->list);
+                       nf_tables_rule_notify(&trans->ctx,
+                                             nft_trans_rule(trans),
+                                             NFT_MSG_DELRULE);
+                       break;
+               case NFT_MSG_NEWSET:
+                       nft_trans_set(trans)->flags &= ~NFT_SET_INACTIVE;
+                       nf_tables_set_notify(&trans->ctx, nft_trans_set(trans),
+                                            NFT_MSG_NEWSET);
+                       nft_trans_destroy(trans);
+                       break;
+               case NFT_MSG_DELSET:
+                       nf_tables_set_notify(&trans->ctx, nft_trans_set(trans),
+                                            NFT_MSG_DELSET);
+                       break;
+               case NFT_MSG_NEWSETELEM:
+                       nft_trans_elem_set(trans)->nelems++;
+                       nf_tables_setelem_notify(&trans->ctx,
+                                                nft_trans_elem_set(trans),
+                                                &nft_trans_elem(trans),
+                                                NFT_MSG_NEWSETELEM, 0);
+                       nft_trans_destroy(trans);
+                       break;
+               case NFT_MSG_DELSETELEM:
+                       nft_trans_elem_set(trans)->nelems--;
+                       nf_tables_setelem_notify(&trans->ctx,
+                                                nft_trans_elem_set(trans),
+                                                &nft_trans_elem(trans),
+                                                NFT_MSG_DELSETELEM, 0);
+                       set = nft_trans_elem_set(trans);
+                       set->ops->get(set, &nft_trans_elem(trans));
+                       set->ops->remove(set, &nft_trans_elem(trans));
+                       nft_trans_destroy(trans);
+                       break;
+               }
+       }
+
+       list_for_each_entry_safe(trans, next, &net->nft.commit_list, list) {
+               list_del(&trans->list);
+               trans->ctx.nla = NULL;
+               call_rcu(&trans->rcu_head, nf_tables_commit_release_rcu);
+       }
+
+       return 0;
+}
+
+/* Schedule objects for release via rcu to make sure no packets are accesing
+ * aborted rules.
+ */
+static void nf_tables_abort_release_rcu(struct rcu_head *rt)
+{
+       struct nft_trans *trans = container_of(rt, struct nft_trans, rcu_head);
+
+       switch (trans->msg_type) {
+       case NFT_MSG_NEWTABLE:
+               nf_tables_table_destroy(&trans->ctx);
+               break;
+       case NFT_MSG_NEWCHAIN:
+               nf_tables_chain_destroy(trans->ctx.chain);
+               break;
+       case NFT_MSG_NEWRULE:
+               nf_tables_rule_destroy(&trans->ctx, nft_trans_rule(trans));
+               break;
+       case NFT_MSG_NEWSET:
+               nft_set_destroy(nft_trans_set(trans));
+               break;
+       }
+       kfree(trans);
+}
+
+static int nf_tables_abort(struct sk_buff *skb)
+{
+       struct net *net = sock_net(skb->sk);
+       struct nft_trans *trans, *next;
+       struct nft_set *set;
+
+       list_for_each_entry_safe(trans, next, &net->nft.commit_list, list) {
+               switch (trans->msg_type) {
+               case NFT_MSG_NEWTABLE:
+                       if (nft_trans_table_update(trans)) {
+                               if (nft_trans_table_enable(trans)) {
+                                       nf_tables_table_disable(trans->ctx.afi,
+                                                               trans->ctx.table);
+                                       trans->ctx.table->flags |= NFT_TABLE_F_DORMANT;
+                               }
+                               nft_trans_destroy(trans);
+                       } else {
+                               list_del(&trans->ctx.table->list);
+                       }
+                       break;
+               case NFT_MSG_DELTABLE:
+                       list_add_tail(&trans->ctx.table->list,
+                                     &trans->ctx.afi->tables);
+                       nft_trans_destroy(trans);
+                       break;
+               case NFT_MSG_NEWCHAIN:
+                       if (nft_trans_chain_update(trans)) {
+                               if (nft_trans_chain_stats(trans))
+                                       free_percpu(nft_trans_chain_stats(trans));
+
+                               nft_trans_destroy(trans);
+                       } else {
+                               list_del(&trans->ctx.chain->list);
+                               if (!(trans->ctx.table->flags & NFT_TABLE_F_DORMANT) &&
+                                   trans->ctx.chain->flags & NFT_BASE_CHAIN) {
+                                       nf_unregister_hooks(nft_base_chain(trans->ctx.chain)->ops,
+                                                           trans->ctx.afi->nops);
+                               }
+                       }
+                       break;
+               case NFT_MSG_DELCHAIN:
+                       list_add_tail(&trans->ctx.chain->list,
+                                     &trans->ctx.table->chains);
+                       nft_trans_destroy(trans);
+                       break;
+               case NFT_MSG_NEWRULE:
+                       list_del_rcu(&nft_trans_rule(trans)->list);
+                       break;
+               case NFT_MSG_DELRULE:
+                       nft_rule_clear(trans->ctx.net, nft_trans_rule(trans));
+                       nft_trans_destroy(trans);
+                       break;
+               case NFT_MSG_NEWSET:
+                       list_del(&nft_trans_set(trans)->list);
+                       break;
+               case NFT_MSG_DELSET:
+                       list_add_tail(&nft_trans_set(trans)->list,
+                                     &trans->ctx.table->sets);
+                       nft_trans_destroy(trans);
+                       break;
+               case NFT_MSG_NEWSETELEM:
+                       set = nft_trans_elem_set(trans);
+                       set->ops->get(set, &nft_trans_elem(trans));
+                       set->ops->remove(set, &nft_trans_elem(trans));
+                       nft_trans_destroy(trans);
+                       break;
+               case NFT_MSG_DELSETELEM:
+                       nft_trans_destroy(trans);
+                       break;
+               }
+       }
+
+       list_for_each_entry_safe(trans, next, &net->nft.commit_list, list) {
+               list_del(&trans->list);
+               trans->ctx.nla = NULL;
+               call_rcu(&trans->rcu_head, nf_tables_abort_release_rcu);
+       }
+
+       return 0;
+}
+
 static const struct nfnetlink_subsystem nf_tables_subsys = {
        .name           = "nf_tables",
        .subsys_id      = NFNL_SUBSYS_NFTABLES,
index e8138da4c14f70f40449c72ec4dc4d31f2960b8e..c138b8fbe280af6886693421a7fe8d9a288156cf 100644 (file)
@@ -256,15 +256,15 @@ replay:
 #endif
                {
                        nfnl_unlock(subsys_id);
-                       kfree_skb(nskb);
-                       return netlink_ack(skb, nlh, -EOPNOTSUPP);
+                       netlink_ack(skb, nlh, -EOPNOTSUPP);
+                       return kfree_skb(nskb);
                }
        }
 
        if (!ss->commit || !ss->abort) {
                nfnl_unlock(subsys_id);
-               kfree_skb(nskb);
-               return netlink_ack(skb, nlh, -EOPNOTSUPP);
+               netlink_ack(skb, nlh, -EOPNOTSUPP);
+               return kfree_skb(skb);
        }
 
        while (skb->len >= nlmsg_total_size(0)) {
@@ -368,14 +368,13 @@ done:
 static void nfnetlink_rcv(struct sk_buff *skb)
 {
        struct nlmsghdr *nlh = nlmsg_hdr(skb);
-       struct net *net = sock_net(skb->sk);
        int msglen;
 
        if (nlh->nlmsg_len < NLMSG_HDRLEN ||
            skb->len < nlh->nlmsg_len)
                return;
 
-       if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) {
+       if (!netlink_net_capable(skb, CAP_NET_ADMIN)) {
                netlink_ack(skb, nlh, -EPERM);
                return;
        }
@@ -400,19 +399,17 @@ static void nfnetlink_rcv(struct sk_buff *skb)
 }
 
 #ifdef CONFIG_MODULES
-static void nfnetlink_bind(int group)
+static int nfnetlink_bind(int group)
 {
        const struct nfnetlink_subsystem *ss;
        int type = nfnl_group2type[group];
 
        rcu_read_lock();
        ss = nfnetlink_get_subsys(type);
-       if (!ss) {
-               rcu_read_unlock();
-               request_module("nfnetlink-subsys-%d", type);
-               return;
-       }
        rcu_read_unlock();
+       if (!ss)
+               request_module("nfnetlink-subsys-%d", type);
+       return 0;
 }
 #endif
 
index bd0d41e693416167b4f149f64117e440a5134496..cc5603016242ea8e1f5cdce1d633e3a2687276ae 100644 (file)
@@ -215,22 +215,14 @@ static void nft_ct_l3proto_module_put(uint8_t family)
                nf_ct_l3proto_module_put(family);
 }
 
-static int nft_ct_init_validate_get(const struct nft_expr *expr,
-                                   const struct nlattr * const tb[])
+static int nft_ct_get_init(const struct nft_ctx *ctx,
+                          const struct nft_expr *expr,
+                          const struct nlattr * const tb[])
 {
        struct nft_ct *priv = nft_expr_priv(expr);
+       int err;
 
-       if (tb[NFTA_CT_DIRECTION] != NULL) {
-               priv->dir = nla_get_u8(tb[NFTA_CT_DIRECTION]);
-               switch (priv->dir) {
-               case IP_CT_DIR_ORIGINAL:
-               case IP_CT_DIR_REPLY:
-                       break;
-               default:
-                       return -EINVAL;
-               }
-       }
-
+       priv->key = ntohl(nla_get_be32(tb[NFTA_CT_KEY]));
        switch (priv->key) {
        case NFT_CT_STATE:
        case NFT_CT_DIRECTION:
@@ -262,55 +254,55 @@ static int nft_ct_init_validate_get(const struct nft_expr *expr,
                return -EOPNOTSUPP;
        }
 
-       return 0;
-}
-
-static int nft_ct_init_validate_set(uint32_t key)
-{
-       switch (key) {
-       case NFT_CT_MARK:
-               break;
-       default:
-               return -EOPNOTSUPP;
+       if (tb[NFTA_CT_DIRECTION] != NULL) {
+               priv->dir = nla_get_u8(tb[NFTA_CT_DIRECTION]);
+               switch (priv->dir) {
+               case IP_CT_DIR_ORIGINAL:
+               case IP_CT_DIR_REPLY:
+                       break;
+               default:
+                       return -EINVAL;
+               }
        }
 
+       priv->dreg = ntohl(nla_get_be32(tb[NFTA_CT_DREG]));
+       err = nft_validate_output_register(priv->dreg);
+       if (err < 0)
+               return err;
+
+       err = nft_validate_data_load(ctx, priv->dreg, NULL, NFT_DATA_VALUE);
+       if (err < 0)
+               return err;
+
+       err = nft_ct_l3proto_try_module_get(ctx->afi->family);
+       if (err < 0)
+               return err;
+
        return 0;
 }
 
-static int nft_ct_init(const struct nft_ctx *ctx,
-                      const struct nft_expr *expr,
-                      const struct nlattr * const tb[])
+static int nft_ct_set_init(const struct nft_ctx *ctx,
+                          const struct nft_expr *expr,
+                          const struct nlattr * const tb[])
 {
        struct nft_ct *priv = nft_expr_priv(expr);
        int err;
 
        priv->key = ntohl(nla_get_be32(tb[NFTA_CT_KEY]));
-
-       if (tb[NFTA_CT_DREG]) {
-               err = nft_ct_init_validate_get(expr, tb);
-               if (err < 0)
-                       return err;
-
-               priv->dreg = ntohl(nla_get_be32(tb[NFTA_CT_DREG]));
-               err = nft_validate_output_register(priv->dreg);
-               if (err < 0)
-                       return err;
-
-               err = nft_validate_data_load(ctx, priv->dreg, NULL,
-                                            NFT_DATA_VALUE);
-               if (err < 0)
-                       return err;
-       } else {
-               err = nft_ct_init_validate_set(priv->key);
-               if (err < 0)
-                       return err;
-
-               priv->sreg = ntohl(nla_get_be32(tb[NFTA_CT_SREG]));
-               err = nft_validate_input_register(priv->sreg);
-               if (err < 0)
-                       return err;
+       switch (priv->key) {
+#ifdef CONFIG_NF_CONNTRACK_MARK
+       case NFT_CT_MARK:
+               break;
+#endif
+       default:
+               return -EOPNOTSUPP;
        }
 
+       priv->sreg = ntohl(nla_get_be32(tb[NFTA_CT_SREG]));
+       err = nft_validate_input_register(priv->sreg);
+       if (err < 0)
+               return err;
+
        err = nft_ct_l3proto_try_module_get(ctx->afi->family);
        if (err < 0)
                return err;
@@ -370,7 +362,7 @@ static const struct nft_expr_ops nft_ct_get_ops = {
        .type           = &nft_ct_type,
        .size           = NFT_EXPR_SIZE(sizeof(struct nft_ct)),
        .eval           = nft_ct_get_eval,
-       .init           = nft_ct_init,
+       .init           = nft_ct_get_init,
        .destroy        = nft_ct_destroy,
        .dump           = nft_ct_get_dump,
 };
@@ -379,7 +371,7 @@ static const struct nft_expr_ops nft_ct_set_ops = {
        .type           = &nft_ct_type,
        .size           = NFT_EXPR_SIZE(sizeof(struct nft_ct)),
        .eval           = nft_ct_set_eval,
-       .init           = nft_ct_init,
+       .init           = nft_ct_set_init,
        .destroy        = nft_ct_destroy,
        .dump           = nft_ct_set_dump,
 };
index 3b1ad876d6b028f987ccf8e78ceb9639d767e6bf..1dfeb6786832e83670968f577672e364baa7f039 100644 (file)
@@ -12,6 +12,7 @@
 #include <linux/init.h>
 #include <linux/module.h>
 #include <linux/list.h>
+#include <linux/log2.h>
 #include <linux/jhash.h>
 #include <linux/netlink.h>
 #include <linux/vmalloc.h>
@@ -19,7 +20,7 @@
 #include <linux/netfilter/nf_tables.h>
 #include <net/netfilter/nf_tables.h>
 
-#define NFT_HASH_MIN_SIZE      4
+#define NFT_HASH_MIN_SIZE      4UL
 
 struct nft_hash {
        struct nft_hash_table __rcu     *tbl;
@@ -27,7 +28,6 @@ struct nft_hash {
 
 struct nft_hash_table {
        unsigned int                    size;
-       unsigned int                    elements;
        struct nft_hash_elem __rcu      *buckets[];
 };
 
@@ -82,6 +82,11 @@ static void nft_hash_tbl_free(const struct nft_hash_table *tbl)
                kfree(tbl);
 }
 
+static unsigned int nft_hash_tbl_size(unsigned int nelem)
+{
+       return max(roundup_pow_of_two(nelem * 4 / 3), NFT_HASH_MIN_SIZE);
+}
+
 static struct nft_hash_table *nft_hash_tbl_alloc(unsigned int nbuckets)
 {
        struct nft_hash_table *tbl;
@@ -161,7 +166,6 @@ static int nft_hash_tbl_expand(const struct nft_set *set, struct nft_hash *priv)
                        break;
                }
        }
-       ntbl->elements = tbl->elements;
 
        /* Publish new table */
        rcu_assign_pointer(priv->tbl, ntbl);
@@ -201,7 +205,6 @@ static int nft_hash_tbl_shrink(const struct nft_set *set, struct nft_hash *priv)
                        ;
                RCU_INIT_POINTER(*pprev, tbl->buckets[i + ntbl->size]);
        }
-       ntbl->elements = tbl->elements;
 
        /* Publish new table */
        rcu_assign_pointer(priv->tbl, ntbl);
@@ -237,10 +240,9 @@ static int nft_hash_insert(const struct nft_set *set,
        h = nft_hash_data(&he->key, tbl->size, set->klen);
        RCU_INIT_POINTER(he->next, tbl->buckets[h]);
        rcu_assign_pointer(tbl->buckets[h], he);
-       tbl->elements++;
 
        /* Expand table when exceeding 75% load */
-       if (tbl->elements > tbl->size / 4 * 3)
+       if (set->nelems + 1 > tbl->size / 4 * 3)
                nft_hash_tbl_expand(set, priv);
 
        return 0;
@@ -268,10 +270,9 @@ static void nft_hash_remove(const struct nft_set *set,
        RCU_INIT_POINTER(*pprev, he->next);
        synchronize_rcu();
        kfree(he);
-       tbl->elements--;
 
        /* Shrink table beneath 30% load */
-       if (tbl->elements < tbl->size * 3 / 10 &&
+       if (set->nelems - 1 < tbl->size * 3 / 10 &&
            tbl->size > NFT_HASH_MIN_SIZE)
                nft_hash_tbl_shrink(set, priv);
 }
@@ -335,17 +336,23 @@ static unsigned int nft_hash_privsize(const struct nlattr * const nla[])
 }
 
 static int nft_hash_init(const struct nft_set *set,
+                        const struct nft_set_desc *desc,
                         const struct nlattr * const tb[])
 {
        struct nft_hash *priv = nft_set_priv(set);
        struct nft_hash_table *tbl;
+       unsigned int size;
 
        if (unlikely(!nft_hash_rnd_initted)) {
                get_random_bytes(&nft_hash_rnd, 4);
                nft_hash_rnd_initted = true;
        }
 
-       tbl = nft_hash_tbl_alloc(NFT_HASH_MIN_SIZE);
+       size = NFT_HASH_MIN_SIZE;
+       if (desc->size)
+               size = nft_hash_tbl_size(desc->size);
+
+       tbl = nft_hash_tbl_alloc(size);
        if (tbl == NULL)
                return -ENOMEM;
        RCU_INIT_POINTER(priv->tbl, tbl);
@@ -369,8 +376,37 @@ static void nft_hash_destroy(const struct nft_set *set)
        kfree(tbl);
 }
 
+static bool nft_hash_estimate(const struct nft_set_desc *desc, u32 features,
+                             struct nft_set_estimate *est)
+{
+       unsigned int esize;
+
+       esize = sizeof(struct nft_hash_elem);
+       if (features & NFT_SET_MAP)
+               esize += FIELD_SIZEOF(struct nft_hash_elem, data[0]);
+
+       if (desc->size) {
+               est->size = sizeof(struct nft_hash) +
+                           nft_hash_tbl_size(desc->size) *
+                           sizeof(struct nft_hash_elem *) +
+                           desc->size * esize;
+       } else {
+               /* Resizing happens when the load drops below 30% or goes
+                * above 75%. The average of 52.5% load (approximated by 50%)
+                * is used for the size estimation of the hash buckets,
+                * meaning we calculate two buckets per element.
+                */
+               est->size = esize + 2 * sizeof(struct nft_hash_elem *);
+       }
+
+       est->class = NFT_SET_CLASS_O_1;
+
+       return true;
+}
+
 static struct nft_set_ops nft_hash_ops __read_mostly = {
        .privsize       = nft_hash_privsize,
+       .estimate       = nft_hash_estimate,
        .init           = nft_hash_init,
        .destroy        = nft_hash_destroy,
        .get            = nft_hash_get,
index 7fd2bea8aa239f347dc461c7bc45869dac405573..6404a726d17b78fc6db6f411216195e68db63950 100644 (file)
@@ -56,8 +56,14 @@ static int nft_lookup_init(const struct nft_ctx *ctx,
                return -EINVAL;
 
        set = nf_tables_set_lookup(ctx->table, tb[NFTA_LOOKUP_SET]);
-       if (IS_ERR(set))
-               return PTR_ERR(set);
+       if (IS_ERR(set)) {
+               if (tb[NFTA_LOOKUP_SET_ID]) {
+                       set = nf_tables_set_lookup_byid(ctx->net,
+                                                       tb[NFTA_LOOKUP_SET_ID]);
+               }
+               if (IS_ERR(set))
+                       return PTR_ERR(set);
+       }
 
        priv->sreg = ntohl(nla_get_be32(tb[NFTA_LOOKUP_SREG]));
        err = nft_validate_input_register(priv->sreg);
index 425cf39af8907f1d0618b0904121073ea4dc116a..852b178c6ae7fa2f7dbd6f7887404033da45971f 100644 (file)
 #include <net/sock.h>
 #include <net/tcp_states.h> /* for TCP_TIME_WAIT */
 #include <net/netfilter/nf_tables.h>
+#include <net/netfilter/nft_meta.h>
 
-struct nft_meta {
-       enum nft_meta_keys      key:8;
-       union {
-               enum nft_registers      dreg:8;
-               enum nft_registers      sreg:8;
-       };
-};
-
-static void nft_meta_get_eval(const struct nft_expr *expr,
-                             struct nft_data data[NFT_REG_MAX + 1],
-                             const struct nft_pktinfo *pkt)
+void nft_meta_get_eval(const struct nft_expr *expr,
+                      struct nft_data data[NFT_REG_MAX + 1],
+                      const struct nft_pktinfo *pkt)
 {
        const struct nft_meta *priv = nft_expr_priv(expr);
        const struct sk_buff *skb = pkt->skb;
@@ -140,10 +133,11 @@ static void nft_meta_get_eval(const struct nft_expr *expr,
 err:
        data[NFT_REG_VERDICT].verdict = NFT_BREAK;
 }
+EXPORT_SYMBOL_GPL(nft_meta_get_eval);
 
-static void nft_meta_set_eval(const struct nft_expr *expr,
-                             struct nft_data data[NFT_REG_MAX + 1],
-                             const struct nft_pktinfo *pkt)
+void nft_meta_set_eval(const struct nft_expr *expr,
+                      struct nft_data data[NFT_REG_MAX + 1],
+                      const struct nft_pktinfo *pkt)
 {
        const struct nft_meta *meta = nft_expr_priv(expr);
        struct sk_buff *skb = pkt->skb;
@@ -163,28 +157,24 @@ static void nft_meta_set_eval(const struct nft_expr *expr,
                WARN_ON(1);
        }
 }
+EXPORT_SYMBOL_GPL(nft_meta_set_eval);
 
-static const struct nla_policy nft_meta_policy[NFTA_META_MAX + 1] = {
+const struct nla_policy nft_meta_policy[NFTA_META_MAX + 1] = {
        [NFTA_META_DREG]        = { .type = NLA_U32 },
        [NFTA_META_KEY]         = { .type = NLA_U32 },
        [NFTA_META_SREG]        = { .type = NLA_U32 },
 };
+EXPORT_SYMBOL_GPL(nft_meta_policy);
 
-static int nft_meta_init_validate_set(uint32_t key)
+int nft_meta_get_init(const struct nft_ctx *ctx,
+                     const struct nft_expr *expr,
+                     const struct nlattr * const tb[])
 {
-       switch (key) {
-       case NFT_META_MARK:
-       case NFT_META_PRIORITY:
-       case NFT_META_NFTRACE:
-               return 0;
-       default:
-               return -EOPNOTSUPP;
-       }
-}
+       struct nft_meta *priv = nft_expr_priv(expr);
+       int err;
 
-static int nft_meta_init_validate_get(uint32_t key)
-{
-       switch (key) {
+       priv->key = ntohl(nla_get_be32(tb[NFTA_META_KEY]));
+       switch (priv->key) {
        case NFT_META_LEN:
        case NFT_META_PROTOCOL:
        case NFT_META_NFPROTO:
@@ -205,39 +195,41 @@ static int nft_meta_init_validate_get(uint32_t key)
 #ifdef CONFIG_NETWORK_SECMARK
        case NFT_META_SECMARK:
 #endif
-               return 0;
+               break;
        default:
                return -EOPNOTSUPP;
        }
 
+       priv->dreg = ntohl(nla_get_be32(tb[NFTA_META_DREG]));
+       err = nft_validate_output_register(priv->dreg);
+       if (err < 0)
+               return err;
+
+       err = nft_validate_data_load(ctx, priv->dreg, NULL, NFT_DATA_VALUE);
+       if (err < 0)
+               return err;
+
+       return 0;
 }
+EXPORT_SYMBOL_GPL(nft_meta_get_init);
 
-static int nft_meta_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
-                        const struct nlattr * const tb[])
+int nft_meta_set_init(const struct nft_ctx *ctx,
+                     const struct nft_expr *expr,
+                     const struct nlattr * const tb[])
 {
        struct nft_meta *priv = nft_expr_priv(expr);
        int err;
 
        priv->key = ntohl(nla_get_be32(tb[NFTA_META_KEY]));
-
-       if (tb[NFTA_META_DREG]) {
-               err = nft_meta_init_validate_get(priv->key);
-               if (err < 0)
-                       return err;
-
-               priv->dreg = ntohl(nla_get_be32(tb[NFTA_META_DREG]));
-               err = nft_validate_output_register(priv->dreg);
-               if (err < 0)
-                       return err;
-
-               return nft_validate_data_load(ctx, priv->dreg, NULL,
-                                             NFT_DATA_VALUE);
+       switch (priv->key) {
+       case NFT_META_MARK:
+       case NFT_META_PRIORITY:
+       case NFT_META_NFTRACE:
+               break;
+       default:
+               return -EOPNOTSUPP;
        }
 
-       err = nft_meta_init_validate_set(priv->key);
-       if (err < 0)
-               return err;
-
        priv->sreg = ntohl(nla_get_be32(tb[NFTA_META_SREG]));
        err = nft_validate_input_register(priv->sreg);
        if (err < 0)
@@ -245,9 +237,10 @@ static int nft_meta_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
 
        return 0;
 }
+EXPORT_SYMBOL_GPL(nft_meta_set_init);
 
-static int nft_meta_get_dump(struct sk_buff *skb,
-                            const struct nft_expr *expr)
+int nft_meta_get_dump(struct sk_buff *skb,
+                     const struct nft_expr *expr)
 {
        const struct nft_meta *priv = nft_expr_priv(expr);
 
@@ -260,9 +253,10 @@ static int nft_meta_get_dump(struct sk_buff *skb,
 nla_put_failure:
        return -1;
 }
+EXPORT_SYMBOL_GPL(nft_meta_get_dump);
 
-static int nft_meta_set_dump(struct sk_buff *skb,
-                            const struct nft_expr *expr)
+int nft_meta_set_dump(struct sk_buff *skb,
+                     const struct nft_expr *expr)
 {
        const struct nft_meta *priv = nft_expr_priv(expr);
 
@@ -276,13 +270,14 @@ static int nft_meta_set_dump(struct sk_buff *skb,
 nla_put_failure:
        return -1;
 }
+EXPORT_SYMBOL_GPL(nft_meta_set_dump);
 
 static struct nft_expr_type nft_meta_type;
 static const struct nft_expr_ops nft_meta_get_ops = {
        .type           = &nft_meta_type,
        .size           = NFT_EXPR_SIZE(sizeof(struct nft_meta)),
        .eval           = nft_meta_get_eval,
-       .init           = nft_meta_init,
+       .init           = nft_meta_get_init,
        .dump           = nft_meta_get_dump,
 };
 
@@ -290,7 +285,7 @@ static const struct nft_expr_ops nft_meta_set_ops = {
        .type           = &nft_meta_type,
        .size           = NFT_EXPR_SIZE(sizeof(struct nft_meta)),
        .eval           = nft_meta_set_eval,
-       .init           = nft_meta_init,
+       .init           = nft_meta_set_init,
        .dump           = nft_meta_set_dump,
 };
 
index e21d69d13506b95946820f24641fe7e48d885866..072e611e9f712299b0ac049d2ca4d94c1b85cd86 100644 (file)
@@ -201,6 +201,7 @@ static unsigned int nft_rbtree_privsize(const struct nlattr * const nla[])
 }
 
 static int nft_rbtree_init(const struct nft_set *set,
+                          const struct nft_set_desc *desc,
                           const struct nlattr * const nla[])
 {
        struct nft_rbtree *priv = nft_set_priv(set);
@@ -222,8 +223,28 @@ static void nft_rbtree_destroy(const struct nft_set *set)
        }
 }
 
+static bool nft_rbtree_estimate(const struct nft_set_desc *desc, u32 features,
+                               struct nft_set_estimate *est)
+{
+       unsigned int nsize;
+
+       nsize = sizeof(struct nft_rbtree_elem);
+       if (features & NFT_SET_MAP)
+               nsize += FIELD_SIZEOF(struct nft_rbtree_elem, data[0]);
+
+       if (desc->size)
+               est->size = sizeof(struct nft_rbtree) + desc->size * nsize;
+       else
+               est->size = nsize;
+
+       est->class = NFT_SET_CLASS_O_LOG_N;
+
+       return true;
+}
+
 static struct nft_set_ops nft_rbtree_ops __read_mostly = {
        .privsize       = nft_rbtree_privsize,
+       .estimate       = nft_rbtree_estimate,
        .init           = nft_rbtree_init,
        .destroy        = nft_rbtree_destroy,
        .insert         = nft_rbtree_insert,
index 894cda0206bb9b8a32488ec81959482a7cc582d5..e0ccd84d4d6781ab761349e0ac913f6ddc3e8994 100644 (file)
@@ -1206,7 +1206,8 @@ static int netlink_create(struct net *net, struct socket *sock, int protocol,
        struct module *module = NULL;
        struct mutex *cb_mutex;
        struct netlink_sock *nlk;
-       void (*bind)(int group);
+       int (*bind)(int group);
+       void (*unbind)(int group);
        int err = 0;
 
        sock->state = SS_UNCONNECTED;
@@ -1232,6 +1233,7 @@ static int netlink_create(struct net *net, struct socket *sock, int protocol,
                err = -EPROTONOSUPPORT;
        cb_mutex = nl_table[protocol].cb_mutex;
        bind = nl_table[protocol].bind;
+       unbind = nl_table[protocol].unbind;
        netlink_unlock_table();
 
        if (err < 0)
@@ -1248,6 +1250,7 @@ static int netlink_create(struct net *net, struct socket *sock, int protocol,
        nlk = nlk_sk(sock->sk);
        nlk->module = module;
        nlk->netlink_bind = bind;
+       nlk->netlink_unbind = unbind;
 out:
        return err;
 
@@ -1301,6 +1304,7 @@ static int netlink_release(struct socket *sock)
                        kfree_rcu(old, rcu);
                        nl_table[sk->sk_protocol].module = NULL;
                        nl_table[sk->sk_protocol].bind = NULL;
+                       nl_table[sk->sk_protocol].unbind = NULL;
                        nl_table[sk->sk_protocol].flags = 0;
                        nl_table[sk->sk_protocol].registered = 0;
                }
@@ -1360,7 +1364,72 @@ retry:
        return err;
 }
 
-static inline int netlink_capable(const struct socket *sock, unsigned int flag)
+/**
+ * __netlink_ns_capable - General netlink message capability test
+ * @nsp: NETLINK_CB of the socket buffer holding a netlink command from userspace.
+ * @user_ns: The user namespace of the capability to use
+ * @cap: The capability to use
+ *
+ * Test to see if the opener of the socket we received the message
+ * from had when the netlink socket was created and the sender of the
+ * message has has the capability @cap in the user namespace @user_ns.
+ */
+bool __netlink_ns_capable(const struct netlink_skb_parms *nsp,
+                       struct user_namespace *user_ns, int cap)
+{
+       return sk_ns_capable(nsp->sk, user_ns, cap);
+}
+EXPORT_SYMBOL(__netlink_ns_capable);
+
+/**
+ * netlink_ns_capable - General netlink message capability test
+ * @skb: socket buffer holding a netlink command from userspace
+ * @user_ns: The user namespace of the capability to use
+ * @cap: The capability to use
+ *
+ * Test to see if the opener of the socket we received the message
+ * from had when the netlink socket was created and the sender of the
+ * message has has the capability @cap in the user namespace @user_ns.
+ */
+bool netlink_ns_capable(const struct sk_buff *skb,
+                       struct user_namespace *user_ns, int cap)
+{
+       return __netlink_ns_capable(&NETLINK_CB(skb), user_ns, cap);
+}
+EXPORT_SYMBOL(netlink_ns_capable);
+
+/**
+ * netlink_capable - Netlink global message capability test
+ * @skb: socket buffer holding a netlink command from userspace
+ * @cap: The capability to use
+ *
+ * Test to see if the opener of the socket we received the message
+ * from had when the netlink socket was created and the sender of the
+ * message has has the capability @cap in all user namespaces.
+ */
+bool netlink_capable(const struct sk_buff *skb, int cap)
+{
+       return netlink_ns_capable(skb, &init_user_ns, cap);
+}
+EXPORT_SYMBOL(netlink_capable);
+
+/**
+ * netlink_net_capable - Netlink network namespace message capability test
+ * @skb: socket buffer holding a netlink command from userspace
+ * @cap: The capability to use
+ *
+ * Test to see if the opener of the socket we received the message
+ * from had when the netlink socket was created and the sender of the
+ * message has has the capability @cap over the network namespace of
+ * the socket we received the message from.
+ */
+bool netlink_net_capable(const struct sk_buff *skb, int cap)
+{
+       return netlink_ns_capable(skb, sock_net(skb->sk)->user_ns, cap);
+}
+EXPORT_SYMBOL(netlink_net_capable);
+
+static inline int netlink_allowed(const struct socket *sock, unsigned int flag)
 {
        return (nl_table[sock->sk->sk_protocol].flags & flag) ||
                ns_capable(sock_net(sock->sk)->user_ns, CAP_NET_ADMIN);
@@ -1411,6 +1480,19 @@ static int netlink_realloc_groups(struct sock *sk)
        return err;
 }
 
+static void netlink_unbind(int group, long unsigned int groups,
+                          struct netlink_sock *nlk)
+{
+       int undo;
+
+       if (!nlk->netlink_unbind)
+               return;
+
+       for (undo = 0; undo < group; undo++)
+               if (test_bit(group, &groups))
+                       nlk->netlink_unbind(undo);
+}
+
 static int netlink_bind(struct socket *sock, struct sockaddr *addr,
                        int addr_len)
 {
@@ -1419,6 +1501,7 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr,
        struct netlink_sock *nlk = nlk_sk(sk);
        struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr;
        int err;
+       long unsigned int groups = nladdr->nl_groups;
 
        if (addr_len < sizeof(struct sockaddr_nl))
                return -EINVAL;
@@ -1427,45 +1510,53 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr,
                return -EINVAL;
 
        /* Only superuser is allowed to listen multicasts */
-       if (nladdr->nl_groups) {
-               if (!netlink_capable(sock, NL_CFG_F_NONROOT_RECV))
+       if (groups) {
+               if (!netlink_allowed(sock, NL_CFG_F_NONROOT_RECV))
                        return -EPERM;
                err = netlink_realloc_groups(sk);
                if (err)
                        return err;
        }
 
-       if (nlk->portid) {
+       if (nlk->portid)
                if (nladdr->nl_pid != nlk->portid)
                        return -EINVAL;
-       } else {
+
+       if (nlk->netlink_bind && groups) {
+               int group;
+
+               for (group = 0; group < nlk->ngroups; group++) {
+                       if (!test_bit(group, &groups))
+                               continue;
+                       err = nlk->netlink_bind(group);
+                       if (!err)
+                               continue;
+                       netlink_unbind(group, groups, nlk);
+                       return err;
+               }
+       }
+
+       if (!nlk->portid) {
                err = nladdr->nl_pid ?
                        netlink_insert(sk, net, nladdr->nl_pid) :
                        netlink_autobind(sock);
-               if (err)
+               if (err) {
+                       netlink_unbind(nlk->ngroups - 1, groups, nlk);
                        return err;
+               }
        }
 
-       if (!nladdr->nl_groups && (nlk->groups == NULL || !(u32)nlk->groups[0]))
+       if (!groups && (nlk->groups == NULL || !(u32)nlk->groups[0]))
                return 0;
 
        netlink_table_grab();
        netlink_update_subscriptions(sk, nlk->subscriptions +
-                                        hweight32(nladdr->nl_groups) -
+                                        hweight32(groups) -
                                         hweight32(nlk->groups[0]));
-       nlk->groups[0] = (nlk->groups[0] & ~0xffffffffUL) | nladdr->nl_groups;
+       nlk->groups[0] = (nlk->groups[0] & ~0xffffffffUL) | groups;
        netlink_update_listeners(sk);
        netlink_table_ungrab();
 
-       if (nlk->netlink_bind && nlk->groups[0]) {
-               int i;
-
-               for (i = 0; i < nlk->ngroups; i++) {
-                       if (test_bit(i, nlk->groups))
-                               nlk->netlink_bind(i);
-               }
-       }
-
        return 0;
 }
 
@@ -1490,7 +1581,7 @@ static int netlink_connect(struct socket *sock, struct sockaddr *addr,
                return -EINVAL;
 
        if ((nladdr->nl_groups || nladdr->nl_pid) &&
-           !netlink_capable(sock, NL_CFG_F_NONROOT_SEND))
+           !netlink_allowed(sock, NL_CFG_F_NONROOT_SEND))
                return -EPERM;
 
        if (!nlk->portid)
@@ -2096,20 +2187,24 @@ static int netlink_setsockopt(struct socket *sock, int level, int optname,
                break;
        case NETLINK_ADD_MEMBERSHIP:
        case NETLINK_DROP_MEMBERSHIP: {
-               if (!netlink_capable(sock, NL_CFG_F_NONROOT_RECV))
+               if (!netlink_allowed(sock, NL_CFG_F_NONROOT_RECV))
                        return -EPERM;
                err = netlink_realloc_groups(sk);
                if (err)
                        return err;
                if (!val || val - 1 >= nlk->ngroups)
                        return -EINVAL;
+               if (optname == NETLINK_ADD_MEMBERSHIP && nlk->netlink_bind) {
+                       err = nlk->netlink_bind(val);
+                       if (err)
+                               return err;
+               }
                netlink_table_grab();
                netlink_update_socket_mc(nlk, val,
                                         optname == NETLINK_ADD_MEMBERSHIP);
                netlink_table_ungrab();
-
-               if (nlk->netlink_bind)
-                       nlk->netlink_bind(val);
+               if (optname == NETLINK_DROP_MEMBERSHIP && nlk->netlink_unbind)
+                       nlk->netlink_unbind(val);
 
                err = 0;
                break;
@@ -2247,7 +2342,7 @@ static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock,
                dst_group = ffs(addr->nl_groups);
                err =  -EPERM;
                if ((dst_group || dst_portid) &&
-                   !netlink_capable(sock, NL_CFG_F_NONROOT_SEND))
+                   !netlink_allowed(sock, NL_CFG_F_NONROOT_SEND))
                        goto out;
        } else {
                dst_portid = nlk->dst_portid;
index ed13a790b00e1684215e04c6a68f71f9491cb3c9..0b59d441f5b6bcff8d03b08211e5a901c170dfe8 100644 (file)
@@ -38,7 +38,8 @@ struct netlink_sock {
        struct mutex            *cb_mutex;
        struct mutex            cb_def_mutex;
        void                    (*netlink_rcv)(struct sk_buff *skb);
-       void                    (*netlink_bind)(int group);
+       int                     (*netlink_bind)(int group);
+       void                    (*netlink_unbind)(int group);
        struct module           *module;
 #ifdef CONFIG_NETLINK_MMAP
        struct mutex            pg_vec_lock;
@@ -74,7 +75,8 @@ struct netlink_table {
        unsigned int            groups;
        struct mutex            *cb_mutex;
        struct module           *module;
-       void                    (*bind)(int group);
+       int                     (*bind)(int group);
+       void                    (*unbind)(int group);
        bool                    (*compare)(struct net *net, struct sock *sock);
        int                     registered;
 };
index b1dcdb932a86ee919642f47f58dd3b716995ad76..a3ba3ca0ff9281dec15c0b4d42002394583c1d8d 100644 (file)
@@ -561,7 +561,7 @@ static int genl_family_rcv_msg(struct genl_family *family,
                return -EOPNOTSUPP;
 
        if ((ops->flags & GENL_ADMIN_PERM) &&
-           !capable(CAP_NET_ADMIN))
+           !netlink_capable(skb, CAP_NET_ADMIN))
                return -EPERM;
 
        if ((nlh->nlmsg_flags & NLM_F_DUMP) == NLM_F_DUMP) {
index 2c77e7b1a913241d85f11d19c25b5e182b5757f1..c36856a457ca963c735e89e36478a53ba60bb453 100644 (file)
@@ -134,8 +134,8 @@ static int set_eth_addr(struct sk_buff *skb,
 
        skb_postpull_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2);
 
-       memcpy(eth_hdr(skb)->h_source, eth_key->eth_src, ETH_ALEN);
-       memcpy(eth_hdr(skb)->h_dest, eth_key->eth_dst, ETH_ALEN);
+       ether_addr_copy(eth_hdr(skb)->h_source, eth_key->eth_src);
+       ether_addr_copy(eth_hdr(skb)->h_dest, eth_key->eth_dst);
 
        ovs_skb_postpush_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2);
 
index a3276e3c4feb065278b5195652378157619ff94c..8867d7e2d65bfe41c0103fa92b024edee7fd07e1 100644 (file)
@@ -524,7 +524,7 @@ static int ovs_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info)
                packet->protocol = htons(ETH_P_802_2);
 
        /* Build an sw_flow for sending this packet. */
-       flow = ovs_flow_alloc(false);
+       flow = ovs_flow_alloc();
        err = PTR_ERR(flow);
        if (IS_ERR(flow))
                goto err_kfree_skb;
@@ -782,7 +782,6 @@ static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info)
        struct datapath *dp;
        struct sw_flow_actions *acts = NULL;
        struct sw_flow_match match;
-       bool exact_5tuple;
        int error;
 
        /* Extract key. */
@@ -791,7 +790,7 @@ static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info)
                goto error;
 
        ovs_match_init(&match, &key, &mask);
-       error = ovs_nla_get_match(&match, &exact_5tuple,
+       error = ovs_nla_get_match(&match,
                                  a[OVS_FLOW_ATTR_KEY], a[OVS_FLOW_ATTR_MASK]);
        if (error)
                goto error;
@@ -830,7 +829,7 @@ static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info)
                        goto err_unlock_ovs;
 
                /* Allocate flow. */
-               flow = ovs_flow_alloc(!exact_5tuple);
+               flow = ovs_flow_alloc();
                if (IS_ERR(flow)) {
                        error = PTR_ERR(flow);
                        goto err_unlock_ovs;
@@ -914,7 +913,7 @@ static int ovs_flow_cmd_get(struct sk_buff *skb, struct genl_info *info)
        }
 
        ovs_match_init(&match, &key, NULL);
-       err = ovs_nla_get_match(&match, NULL, a[OVS_FLOW_ATTR_KEY], NULL);
+       err = ovs_nla_get_match(&match, a[OVS_FLOW_ATTR_KEY], NULL);
        if (err)
                return err;
 
@@ -968,7 +967,7 @@ static int ovs_flow_cmd_del(struct sk_buff *skb, struct genl_info *info)
        }
 
        ovs_match_init(&match, &key, NULL);
-       err = ovs_nla_get_match(&match, NULL, a[OVS_FLOW_ATTR_KEY], NULL);
+       err = ovs_nla_get_match(&match, a[OVS_FLOW_ATTR_KEY], NULL);
        if (err)
                goto unlock;
 
index 05317380fc03a03af708716f162738727cab5fff..7ede507500d7daa1cca3d352f9923122f5e63709 100644 (file)
@@ -194,7 +194,9 @@ struct sk_buff *ovs_vport_cmd_build_info(struct vport *, u32 pid, u32 seq,
 int ovs_execute_actions(struct datapath *dp, struct sk_buff *skb);
 void ovs_dp_notify_wq(struct work_struct *work);
 
-#define OVS_NLERR(fmt, ...) \
-       pr_info_once("netlink: " fmt, ##__VA_ARGS__)
-
+#define OVS_NLERR(fmt, ...)                                    \
+do {                                                           \
+       if (net_ratelimit())                                    \
+               pr_info("netlink: " fmt, ##__VA_ARGS__);        \
+} while (0)
 #endif /* datapath.h */
index 2998989e76db0a7ccb8e25ef11aa393180956593..e0fc12bbeeb165e89ce1a844a9df0a75910bcd12 100644 (file)
@@ -65,87 +65,112 @@ void ovs_flow_stats_update(struct sw_flow *flow, struct sk_buff *skb)
 {
        struct flow_stats *stats;
        __be16 tcp_flags = 0;
+       int node = numa_node_id();
 
-       if (!flow->stats.is_percpu)
-               stats = flow->stats.stat;
-       else
-               stats = this_cpu_ptr(flow->stats.cpu_stats);
-
-       if ((flow->key.eth.type == htons(ETH_P_IP) ||
-            flow->key.eth.type == htons(ETH_P_IPV6)) &&
-           flow->key.ip.frag != OVS_FRAG_TYPE_LATER &&
-           flow->key.ip.proto == IPPROTO_TCP &&
-           likely(skb->len >= skb_transport_offset(skb) + sizeof(struct tcphdr))) {
-               tcp_flags = TCP_FLAGS_BE16(tcp_hdr(skb));
+       stats = rcu_dereference(flow->stats[node]);
+
+       if (likely(flow->key.ip.proto == IPPROTO_TCP)) {
+               if (likely(flow->key.eth.type == htons(ETH_P_IP)))
+                       tcp_flags = flow->key.ipv4.tp.flags;
+               else if (likely(flow->key.eth.type == htons(ETH_P_IPV6)))
+                       tcp_flags = flow->key.ipv6.tp.flags;
+       }
+       /* Check if already have node-specific stats. */
+       if (likely(stats)) {
+               spin_lock(&stats->lock);
+               /* Mark if we write on the pre-allocated stats. */
+               if (node == 0 && unlikely(flow->stats_last_writer != node))
+                       flow->stats_last_writer = node;
+       } else {
+               stats = rcu_dereference(flow->stats[0]); /* Pre-allocated. */
+               spin_lock(&stats->lock);
+
+               /* If the current NUMA-node is the only writer on the
+                * pre-allocated stats keep using them.
+                */
+               if (unlikely(flow->stats_last_writer != node)) {
+                       /* A previous locker may have already allocated the
+                        * stats, so we need to check again.  If node-specific
+                        * stats were already allocated, we update the pre-
+                        * allocated stats as we have already locked them.
+                        */
+                       if (likely(flow->stats_last_writer != NUMA_NO_NODE)
+                           && likely(!rcu_dereference(flow->stats[node]))) {
+                               /* Try to allocate node-specific stats. */
+                               struct flow_stats *new_stats;
+
+                               new_stats =
+                                       kmem_cache_alloc_node(flow_stats_cache,
+                                                             GFP_THISNODE |
+                                                             __GFP_NOMEMALLOC,
+                                                             node);
+                               if (likely(new_stats)) {
+                                       new_stats->used = jiffies;
+                                       new_stats->packet_count = 1;
+                                       new_stats->byte_count = skb->len;
+                                       new_stats->tcp_flags = tcp_flags;
+                                       spin_lock_init(&new_stats->lock);
+
+                                       rcu_assign_pointer(flow->stats[node],
+                                                          new_stats);
+                                       goto unlock;
+                               }
+                       }
+                       flow->stats_last_writer = node;
+               }
        }
 
-       spin_lock(&stats->lock);
        stats->used = jiffies;
        stats->packet_count++;
        stats->byte_count += skb->len;
        stats->tcp_flags |= tcp_flags;
-       spin_unlock(&stats->lock);
-}
-
-static void stats_read(struct flow_stats *stats,
-                      struct ovs_flow_stats *ovs_stats,
-                      unsigned long *used, __be16 *tcp_flags)
-{
-       spin_lock(&stats->lock);
-       if (!*used || time_after(stats->used, *used))
-               *used = stats->used;
-       *tcp_flags |= stats->tcp_flags;
-       ovs_stats->n_packets += stats->packet_count;
-       ovs_stats->n_bytes += stats->byte_count;
+unlock:
        spin_unlock(&stats->lock);
 }
 
 void ovs_flow_stats_get(struct sw_flow *flow, struct ovs_flow_stats *ovs_stats,
                        unsigned long *used, __be16 *tcp_flags)
 {
-       int cpu;
+       int node;
 
        *used = 0;
        *tcp_flags = 0;
        memset(ovs_stats, 0, sizeof(*ovs_stats));
 
-       local_bh_disable();
-       if (!flow->stats.is_percpu) {
-               stats_read(flow->stats.stat, ovs_stats, used, tcp_flags);
-       } else {
-               for_each_possible_cpu(cpu) {
-                       struct flow_stats *stats;
+       for_each_node(node) {
+               struct flow_stats *stats = rcu_dereference(flow->stats[node]);
 
-                       stats = per_cpu_ptr(flow->stats.cpu_stats, cpu);
-                       stats_read(stats, ovs_stats, used, tcp_flags);
+               if (stats) {
+                       /* Local CPU may write on non-local stats, so we must
+                        * block bottom-halves here.
+                        */
+                       spin_lock_bh(&stats->lock);
+                       if (!*used || time_after(stats->used, *used))
+                               *used = stats->used;
+                       *tcp_flags |= stats->tcp_flags;
+                       ovs_stats->n_packets += stats->packet_count;
+                       ovs_stats->n_bytes += stats->byte_count;
+                       spin_unlock_bh(&stats->lock);
                }
        }
-       local_bh_enable();
-}
-
-static void stats_reset(struct flow_stats *stats)
-{
-       spin_lock(&stats->lock);
-       stats->used = 0;
-       stats->packet_count = 0;
-       stats->byte_count = 0;
-       stats->tcp_flags = 0;
-       spin_unlock(&stats->lock);
 }
 
 void ovs_flow_stats_clear(struct sw_flow *flow)
 {
-       int cpu;
-
-       local_bh_disable();
-       if (!flow->stats.is_percpu) {
-               stats_reset(flow->stats.stat);
-       } else {
-               for_each_possible_cpu(cpu) {
-                       stats_reset(per_cpu_ptr(flow->stats.cpu_stats, cpu));
+       int node;
+
+       for_each_node(node) {
+               struct flow_stats *stats = rcu_dereference(flow->stats[node]);
+
+               if (stats) {
+                       spin_lock_bh(&stats->lock);
+                       stats->used = 0;
+                       stats->packet_count = 0;
+                       stats->byte_count = 0;
+                       stats->tcp_flags = 0;
+                       spin_unlock_bh(&stats->lock);
                }
        }
-       local_bh_enable();
 }
 
 static int check_header(struct sk_buff *skb, int len)
@@ -372,14 +397,14 @@ static int parse_icmpv6(struct sk_buff *skb, struct sw_flow_key *key,
                            && opt_len == 8) {
                                if (unlikely(!is_zero_ether_addr(key->ipv6.nd.sll)))
                                        goto invalid;
-                               memcpy(key->ipv6.nd.sll,
-                                   &nd->opt[offset+sizeof(*nd_opt)], ETH_ALEN);
+                               ether_addr_copy(key->ipv6.nd.sll,
+                                               &nd->opt[offset+sizeof(*nd_opt)]);
                        } else if (nd_opt->nd_opt_type == ND_OPT_TARGET_LL_ADDR
                                   && opt_len == 8) {
                                if (unlikely(!is_zero_ether_addr(key->ipv6.nd.tll)))
                                        goto invalid;
-                               memcpy(key->ipv6.nd.tll,
-                                   &nd->opt[offset+sizeof(*nd_opt)], ETH_ALEN);
+                               ether_addr_copy(key->ipv6.nd.tll,
+                                               &nd->opt[offset+sizeof(*nd_opt)]);
                        }
 
                        icmp_len -= opt_len;
@@ -439,8 +464,8 @@ int ovs_flow_extract(struct sk_buff *skb, u16 in_port, struct sw_flow_key *key)
         * header in the linear data area.
         */
        eth = eth_hdr(skb);
-       memcpy(key->eth.src, eth->h_source, ETH_ALEN);
-       memcpy(key->eth.dst, eth->h_dest, ETH_ALEN);
+       ether_addr_copy(key->eth.src, eth->h_source);
+       ether_addr_copy(key->eth.dst, eth->h_dest);
 
        __skb_pull(skb, 2 * ETH_ALEN);
        /* We are going to push all headers that we pull, so no need to
@@ -538,8 +563,8 @@ int ovs_flow_extract(struct sk_buff *skb, u16 in_port, struct sw_flow_key *key)
                                key->ip.proto = ntohs(arp->ar_op);
                        memcpy(&key->ipv4.addr.src, arp->ar_sip, sizeof(key->ipv4.addr.src));
                        memcpy(&key->ipv4.addr.dst, arp->ar_tip, sizeof(key->ipv4.addr.dst));
-                       memcpy(key->ipv4.arp.sha, arp->ar_sha, ETH_ALEN);
-                       memcpy(key->ipv4.arp.tha, arp->ar_tha, ETH_ALEN);
+                       ether_addr_copy(key->ipv4.arp.sha, arp->ar_sha);
+                       ether_addr_copy(key->ipv4.arp.tha, arp->ar_tha);
                }
        } else if (key->eth.type == htons(ETH_P_IPV6)) {
                int nh_len;             /* IPv6 Header + Extensions */
index 2d770e28a3a396f7da0a4c8e7baf4b29709ceb0c..ddcebc53224f0d1c91ad0c7a8158db8236aafd41 100644 (file)
@@ -155,24 +155,22 @@ struct flow_stats {
        __be16 tcp_flags;               /* Union of seen TCP flags. */
 };
 
-struct sw_flow_stats {
-       bool is_percpu;
-       union {
-               struct flow_stats *stat;
-               struct flow_stats __percpu *cpu_stats;
-       };
-};
-
 struct sw_flow {
        struct rcu_head rcu;
        struct hlist_node hash_node[2];
        u32 hash;
-
+       int stats_last_writer;          /* NUMA-node id of the last writer on
+                                        * 'stats[0]'.
+                                        */
        struct sw_flow_key key;
        struct sw_flow_key unmasked_key;
        struct sw_flow_mask *mask;
        struct sw_flow_actions __rcu *sf_acts;
-       struct sw_flow_stats stats;
+       struct flow_stats __rcu *stats[]; /* One for each NUMA node.  First one
+                                          * is allocated at flow creation time,
+                                          * the rest are allocated on demand
+                                          * while holding the 'stats[0].lock'.
+                                          */
 };
 
 struct arp_eth_header {
index 4d000acaed0db5cc2052ae55f5b0cb7be6472b79..32a725cfeb0e83d06b812a13ae3f827a0d65b36c 100644 (file)
@@ -16,6 +16,8 @@
  * 02110-1301, USA
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include "flow.h"
 #include "datapath.h"
 #include <linux/uaccess.h>
@@ -216,14 +218,14 @@ static bool match_validate(const struct sw_flow_match *match,
        if ((key_attrs & key_expected) != key_expected) {
                /* Key attributes check failed. */
                OVS_NLERR("Missing expected key attributes (key_attrs=%llx, expected=%llx).\n",
-                               key_attrs, key_expected);
+                               (unsigned long long)key_attrs, (unsigned long long)key_expected);
                return false;
        }
 
        if ((mask_attrs & mask_allowed) != mask_attrs) {
                /* Mask attributes check failed. */
                OVS_NLERR("Contain more than allowed mask fields (mask_attrs=%llx, mask_allowed=%llx).\n",
-                               mask_attrs, mask_allowed);
+                               (unsigned long long)mask_attrs, (unsigned long long)mask_allowed);
                return false;
        }
 
@@ -266,20 +268,6 @@ static bool is_all_zero(const u8 *fp, size_t size)
        return true;
 }
 
-static bool is_all_set(const u8 *fp, size_t size)
-{
-       int i;
-
-       if (!fp)
-               return false;
-
-       for (i = 0; i < size; i++)
-               if (fp[i] != 0xff)
-                       return false;
-
-       return true;
-}
-
 static int __parse_flow_nlattrs(const struct nlattr *attr,
                                const struct nlattr *a[],
                                u64 *attrsp, bool nz)
@@ -501,9 +489,8 @@ static int metadata_from_nlattrs(struct sw_flow_match *match,  u64 *attrs,
        return 0;
 }
 
-static int ovs_key_from_nlattrs(struct sw_flow_match *match,  bool *exact_5tuple,
-                               u64 attrs, const struct nlattr **a,
-                               bool is_mask)
+static int ovs_key_from_nlattrs(struct sw_flow_match *match, u64 attrs,
+                               const struct nlattr **a, bool is_mask)
 {
        int err;
        u64 orig_attrs = attrs;
@@ -560,11 +547,6 @@ static int ovs_key_from_nlattrs(struct sw_flow_match *match,  bool *exact_5tuple
                SW_FLOW_KEY_PUT(match, eth.type, htons(ETH_P_802_2), is_mask);
        }
 
-       if (is_mask && exact_5tuple) {
-               if (match->mask->key.eth.type != htons(0xffff))
-                       *exact_5tuple = false;
-       }
-
        if (attrs & (1 << OVS_KEY_ATTR_IPV4)) {
                const struct ovs_key_ipv4 *ipv4_key;
 
@@ -587,13 +569,6 @@ static int ovs_key_from_nlattrs(struct sw_flow_match *match,  bool *exact_5tuple
                SW_FLOW_KEY_PUT(match, ipv4.addr.dst,
                                ipv4_key->ipv4_dst, is_mask);
                attrs &= ~(1 << OVS_KEY_ATTR_IPV4);
-
-               if (is_mask && exact_5tuple && *exact_5tuple) {
-                       if (ipv4_key->ipv4_proto != 0xff ||
-                           ipv4_key->ipv4_src != htonl(0xffffffff) ||
-                           ipv4_key->ipv4_dst != htonl(0xffffffff))
-                               *exact_5tuple = false;
-               }
        }
 
        if (attrs & (1 << OVS_KEY_ATTR_IPV6)) {
@@ -625,13 +600,6 @@ static int ovs_key_from_nlattrs(struct sw_flow_match *match,  bool *exact_5tuple
                                is_mask);
 
                attrs &= ~(1 << OVS_KEY_ATTR_IPV6);
-
-               if (is_mask && exact_5tuple && *exact_5tuple) {
-                       if (ipv6_key->ipv6_proto != 0xff ||
-                           !is_all_set((u8 *)ipv6_key->ipv6_src, sizeof(match->key->ipv6.addr.src)) ||
-                           !is_all_set((u8 *)ipv6_key->ipv6_dst, sizeof(match->key->ipv6.addr.dst)))
-                               *exact_5tuple = false;
-               }
        }
 
        if (attrs & (1 << OVS_KEY_ATTR_ARP)) {
@@ -674,11 +642,6 @@ static int ovs_key_from_nlattrs(struct sw_flow_match *match,  bool *exact_5tuple
                                        tcp_key->tcp_dst, is_mask);
                }
                attrs &= ~(1 << OVS_KEY_ATTR_TCP);
-
-               if (is_mask && exact_5tuple && *exact_5tuple &&
-                   (tcp_key->tcp_src != htons(0xffff) ||
-                    tcp_key->tcp_dst != htons(0xffff)))
-                       *exact_5tuple = false;
        }
 
        if (attrs & (1 << OVS_KEY_ATTR_TCP_FLAGS)) {
@@ -710,11 +673,6 @@ static int ovs_key_from_nlattrs(struct sw_flow_match *match,  bool *exact_5tuple
                                        udp_key->udp_dst, is_mask);
                }
                attrs &= ~(1 << OVS_KEY_ATTR_UDP);
-
-               if (is_mask && exact_5tuple && *exact_5tuple &&
-                   (udp_key->udp_src != htons(0xffff) ||
-                    udp_key->udp_dst != htons(0xffff)))
-                       *exact_5tuple = false;
        }
 
        if (attrs & (1 << OVS_KEY_ATTR_SCTP)) {
@@ -800,7 +758,6 @@ static void sw_flow_mask_set(struct sw_flow_mask *mask,
  * attribute specifies the mask field of the wildcarded flow.
  */
 int ovs_nla_get_match(struct sw_flow_match *match,
-                     bool *exact_5tuple,
                      const struct nlattr *key,
                      const struct nlattr *mask)
 {
@@ -848,13 +805,10 @@ int ovs_nla_get_match(struct sw_flow_match *match,
                }
        }
 
-       err = ovs_key_from_nlattrs(match, NULL, key_attrs, a, false);
+       err = ovs_key_from_nlattrs(match, key_attrs, a, false);
        if (err)
                return err;
 
-       if (exact_5tuple)
-               *exact_5tuple = true;
-
        if (mask) {
                err = parse_flow_mask_nlattrs(mask, a, &mask_attrs);
                if (err)
@@ -892,7 +846,7 @@ int ovs_nla_get_match(struct sw_flow_match *match,
                        }
                }
 
-               err = ovs_key_from_nlattrs(match, exact_5tuple, mask_attrs, a, true);
+               err = ovs_key_from_nlattrs(match, mask_attrs, a, true);
                if (err)
                        return err;
        } else {
@@ -982,8 +936,8 @@ int ovs_nla_put_flow(const struct sw_flow_key *swkey,
                goto nla_put_failure;
 
        eth_key = nla_data(nla);
-       memcpy(eth_key->eth_src, output->eth.src, ETH_ALEN);
-       memcpy(eth_key->eth_dst, output->eth.dst, ETH_ALEN);
+       ether_addr_copy(eth_key->eth_src, output->eth.src);
+       ether_addr_copy(eth_key->eth_dst, output->eth.dst);
 
        if (swkey->eth.tci || swkey->eth.type == htons(ETH_P_8021Q)) {
                __be16 eth_type;
@@ -1055,8 +1009,8 @@ int ovs_nla_put_flow(const struct sw_flow_key *swkey,
                arp_key->arp_sip = output->ipv4.addr.src;
                arp_key->arp_tip = output->ipv4.addr.dst;
                arp_key->arp_op = htons(output->ip.proto);
-               memcpy(arp_key->arp_sha, output->ipv4.arp.sha, ETH_ALEN);
-               memcpy(arp_key->arp_tha, output->ipv4.arp.tha, ETH_ALEN);
+               ether_addr_copy(arp_key->arp_sha, output->ipv4.arp.sha);
+               ether_addr_copy(arp_key->arp_tha, output->ipv4.arp.tha);
        }
 
        if ((swkey->eth.type == htons(ETH_P_IP) ||
@@ -1105,11 +1059,11 @@ int ovs_nla_put_flow(const struct sw_flow_key *swkey,
                                goto nla_put_failure;
                        sctp_key = nla_data(nla);
                        if (swkey->eth.type == htons(ETH_P_IP)) {
-                               sctp_key->sctp_src = swkey->ipv4.tp.src;
-                               sctp_key->sctp_dst = swkey->ipv4.tp.dst;
+                               sctp_key->sctp_src = output->ipv4.tp.src;
+                               sctp_key->sctp_dst = output->ipv4.tp.dst;
                        } else if (swkey->eth.type == htons(ETH_P_IPV6)) {
-                               sctp_key->sctp_src = swkey->ipv6.tp.src;
-                               sctp_key->sctp_dst = swkey->ipv6.tp.dst;
+                               sctp_key->sctp_src = output->ipv6.tp.src;
+                               sctp_key->sctp_dst = output->ipv6.tp.dst;
                        }
                } else if (swkey->eth.type == htons(ETH_P_IP) &&
                           swkey->ip.proto == IPPROTO_ICMP) {
@@ -1143,8 +1097,8 @@ int ovs_nla_put_flow(const struct sw_flow_key *swkey,
                                nd_key = nla_data(nla);
                                memcpy(nd_key->nd_target, &output->ipv6.nd.target,
                                                        sizeof(nd_key->nd_target));
-                               memcpy(nd_key->nd_sll, output->ipv6.nd.sll, ETH_ALEN);
-                               memcpy(nd_key->nd_tll, output->ipv6.nd.tll, ETH_ALEN);
+                               ether_addr_copy(nd_key->nd_sll, output->ipv6.nd.sll);
+                               ether_addr_copy(nd_key->nd_tll, output->ipv6.nd.tll);
                        }
                }
        }
index b31fbe28bc7a81a0640546b6e55be8841c32f8a5..440151045d3946329bf01e4fd5a1c81f0fd4e906 100644 (file)
@@ -45,7 +45,6 @@ int ovs_nla_put_flow(const struct sw_flow_key *,
 int ovs_nla_get_flow_metadata(struct sw_flow *flow,
                              const struct nlattr *attr);
 int ovs_nla_get_match(struct sw_flow_match *match,
-                     bool *exact_5tuple,
                      const struct nlattr *,
                      const struct nlattr *);
 
index 3c268b3d71c34baa0a8c70888823b37da454fffd..d8ef37b937bda884f00ffe7c53b8d95446c5f0f0 100644 (file)
@@ -48,6 +48,7 @@
 #define REHASH_INTERVAL                (10 * 60 * HZ)
 
 static struct kmem_cache *flow_cache;
+struct kmem_cache *flow_stats_cache __read_mostly;
 
 static u16 range_n_bytes(const struct sw_flow_key_range *range)
 {
@@ -57,8 +58,10 @@ static u16 range_n_bytes(const struct sw_flow_key_range *range)
 void ovs_flow_mask_key(struct sw_flow_key *dst, const struct sw_flow_key *src,
                       const struct sw_flow_mask *mask)
 {
-       const long *m = (long *)((u8 *)&mask->key + mask->range.start);
-       const long *s = (long *)((u8 *)src + mask->range.start);
+       const long *m = (const long *)((const u8 *)&mask->key +
+                               mask->range.start);
+       const long *s = (const long *)((const u8 *)src +
+                               mask->range.start);
        long *d = (long *)((u8 *)dst + mask->range.start);
        int i;
 
@@ -70,10 +73,11 @@ void ovs_flow_mask_key(struct sw_flow_key *dst, const struct sw_flow_key *src,
                *d++ = *s++ & *m++;
 }
 
-struct sw_flow *ovs_flow_alloc(bool percpu_stats)
+struct sw_flow *ovs_flow_alloc(void)
 {
        struct sw_flow *flow;
-       int cpu;
+       struct flow_stats *stats;
+       int node;
 
        flow = kmem_cache_alloc(flow_cache, GFP_KERNEL);
        if (!flow)
@@ -81,27 +85,22 @@ struct sw_flow *ovs_flow_alloc(bool percpu_stats)
 
        flow->sf_acts = NULL;
        flow->mask = NULL;
+       flow->stats_last_writer = NUMA_NO_NODE;
 
-       flow->stats.is_percpu = percpu_stats;
+       /* Initialize the default stat node. */
+       stats = kmem_cache_alloc_node(flow_stats_cache,
+                                     GFP_KERNEL | __GFP_ZERO, 0);
+       if (!stats)
+               goto err;
 
-       if (!percpu_stats) {
-               flow->stats.stat = kzalloc(sizeof(*flow->stats.stat), GFP_KERNEL);
-               if (!flow->stats.stat)
-                       goto err;
+       spin_lock_init(&stats->lock);
 
-               spin_lock_init(&flow->stats.stat->lock);
-       } else {
-               flow->stats.cpu_stats = alloc_percpu(struct flow_stats);
-               if (!flow->stats.cpu_stats)
-                       goto err;
+       RCU_INIT_POINTER(flow->stats[0], stats);
 
-               for_each_possible_cpu(cpu) {
-                       struct flow_stats *cpu_stats;
+       for_each_node(node)
+               if (node != 0)
+                       RCU_INIT_POINTER(flow->stats[node], NULL);
 
-                       cpu_stats = per_cpu_ptr(flow->stats.cpu_stats, cpu);
-                       spin_lock_init(&cpu_stats->lock);
-               }
-       }
        return flow;
 err:
        kmem_cache_free(flow_cache, flow);
@@ -138,11 +137,13 @@ static struct flex_array *alloc_buckets(unsigned int n_buckets)
 
 static void flow_free(struct sw_flow *flow)
 {
+       int node;
+
        kfree((struct sf_flow_acts __force *)flow->sf_acts);
-       if (flow->stats.is_percpu)
-               free_percpu(flow->stats.cpu_stats);
-       else
-               kfree(flow->stats.stat);
+       for_each_node(node)
+               if (flow->stats[node])
+                       kmem_cache_free(flow_stats_cache,
+                                       (struct flow_stats __force *)flow->stats[node]);
        kmem_cache_free(flow_cache, flow);
 }
 
@@ -375,7 +376,7 @@ int ovs_flow_tbl_flush(struct flow_table *flow_table)
 static u32 flow_hash(const struct sw_flow_key *key, int key_start,
                     int key_end)
 {
-       u32 *hash_key = (u32 *)((u8 *)key + key_start);
+       const u32 *hash_key = (const u32 *)((const u8 *)key + key_start);
        int hash_u32s = (key_end - key_start) >> 2;
 
        /* Make sure number of hash bytes are multiple of u32. */
@@ -397,8 +398,8 @@ static bool cmp_key(const struct sw_flow_key *key1,
                    const struct sw_flow_key *key2,
                    int key_start, int key_end)
 {
-       const long *cp1 = (long *)((u8 *)key1 + key_start);
-       const long *cp2 = (long *)((u8 *)key2 + key_start);
+       const long *cp1 = (const long *)((const u8 *)key1 + key_start);
+       const long *cp2 = (const long *)((const u8 *)key2 + key_start);
        long diffs = 0;
        int i;
 
@@ -513,8 +514,8 @@ static struct sw_flow_mask *mask_alloc(void)
 static bool mask_equal(const struct sw_flow_mask *a,
                       const struct sw_flow_mask *b)
 {
-       u8 *a_ = (u8 *)&a->key + a->range.start;
-       u8 *b_ = (u8 *)&b->key + b->range.start;
+       const u8 *a_ = (const u8 *)&a->key + a->range.start;
+       const u8 *b_ = (const u8 *)&b->key + b->range.start;
 
        return  (a->range.end == b->range.end)
                && (a->range.start == b->range.start)
@@ -597,16 +598,28 @@ int ovs_flow_init(void)
        BUILD_BUG_ON(__alignof__(struct sw_flow_key) % __alignof__(long));
        BUILD_BUG_ON(sizeof(struct sw_flow_key) % sizeof(long));
 
-       flow_cache = kmem_cache_create("sw_flow", sizeof(struct sw_flow), 0,
-                                       0, NULL);
+       flow_cache = kmem_cache_create("sw_flow", sizeof(struct sw_flow)
+                                      + (num_possible_nodes()
+                                         * sizeof(struct flow_stats *)),
+                                      0, 0, NULL);
        if (flow_cache == NULL)
                return -ENOMEM;
 
+       flow_stats_cache
+               = kmem_cache_create("sw_flow_stats", sizeof(struct flow_stats),
+                                   0, SLAB_HWCACHE_ALIGN, NULL);
+       if (flow_stats_cache == NULL) {
+               kmem_cache_destroy(flow_cache);
+               flow_cache = NULL;
+               return -ENOMEM;
+       }
+
        return 0;
 }
 
 /* Uninitializes the flow module. */
 void ovs_flow_exit(void)
 {
+       kmem_cache_destroy(flow_stats_cache);
        kmem_cache_destroy(flow_cache);
 }
index baaeb101924d81a4beb373be93e07287ed9be020..ca8a5820f6153f67fb9ad987c4c156be882c92ea 100644 (file)
@@ -52,10 +52,12 @@ struct flow_table {
        unsigned int count;
 };
 
+extern struct kmem_cache *flow_stats_cache;
+
 int ovs_flow_init(void);
 void ovs_flow_exit(void);
 
-struct sw_flow *ovs_flow_alloc(bool percpu_stats);
+struct sw_flow *ovs_flow_alloc(void);
 void ovs_flow_free(struct sw_flow *, bool deferred);
 
 int ovs_flow_tbl_init(struct flow_table *);
index ebb6e2442554c89fa2b02f1f8f06b0a3b8acf39e..35ec4fed09e228c7e6fe889d2701cb3a3de7748a 100644 (file)
@@ -172,7 +172,7 @@ static int gre_tnl_send(struct vport *vport, struct sk_buff *skb)
        df = OVS_CB(skb)->tun_key->tun_flags & TUNNEL_DONT_FRAGMENT ?
                htons(IP_DF) : 0;
 
-       skb->local_df = 1;
+       skb->ignore_df = 1;
 
        return iptunnel_xmit(skb->sk, rt, skb, fl.saddr,
                             OVS_CB(skb)->tun_key->ipv4_dst, IPPROTO_GRE,
@@ -256,7 +256,7 @@ static void gre_tnl_destroy(struct vport *vport)
 
        ovs_net = net_generic(net, ovs_net_id);
 
-       rcu_assign_pointer(ovs_net->vport_net.gre_vport, NULL);
+       RCU_INIT_POINTER(ovs_net->vport_net.gre_vport, NULL);
        ovs_vport_deferred_free(vport);
        gre_exit();
 }
index 729c68763fe70d150793e1f01a023d4d8f1b78c0..789af9280e77264b4d7f65ddb6c333e96fa4147f 100644 (file)
@@ -130,7 +130,7 @@ static void do_setup(struct net_device *netdev)
        netdev->priv_flags &= ~IFF_TX_SKB_SHARING;
        netdev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
        netdev->destructor = internal_dev_destructor;
-       SET_ETHTOOL_OPS(netdev, &internal_dev_ethtool_ops);
+       netdev->ethtool_ops = &internal_dev_ethtool_ops;
        netdev->tx_queue_len = 0;
 
        netdev->features = NETIF_F_LLTX | NETIF_F_SG | NETIF_F_FRAGLIST |
index e797a50ac2beec3a019bacf15e2e7c593355a46f..a93efa3f64c3eee0c9e575e619e056b276ba5754 100644 (file)
@@ -170,7 +170,7 @@ static int vxlan_tnl_send(struct vport *vport, struct sk_buff *skb)
        df = OVS_CB(skb)->tun_key->tun_flags & TUNNEL_DONT_FRAGMENT ?
                htons(IP_DF) : 0;
 
-       skb->local_df = 1;
+       skb->ignore_df = 1;
 
        inet_get_local_port_range(net, &port_min, &port_max);
        src_port = vxlan_src_port(port_min, port_max, skb);
@@ -180,7 +180,8 @@ static int vxlan_tnl_send(struct vport *vport, struct sk_buff *skb)
                             OVS_CB(skb)->tun_key->ipv4_tos,
                             OVS_CB(skb)->tun_key->ipv4_ttl, df,
                             src_port, dst_port,
-                            htonl(be64_to_cpu(OVS_CB(skb)->tun_key->tun_id) << 8));
+                            htonl(be64_to_cpu(OVS_CB(skb)->tun_key->tun_id) << 8),
+                            false);
        if (err < 0)
                ip_rt_put(rt);
 error:
index d7e50a17396c5563c778ca9e0e07f9c9a730b56a..8d721e62f388d9990b7850942196440550a2a6bb 100644 (file)
@@ -172,7 +172,7 @@ void ovs_vport_deferred_free(struct vport *vport);
  */
 static inline void *vport_priv(const struct vport *vport)
 {
-       return (u8 *)vport + ALIGN(sizeof(struct vport), VPORT_ALIGN);
+       return (u8 *)(uintptr_t)vport + ALIGN(sizeof(struct vport), VPORT_ALIGN);
 }
 
 /**
@@ -185,9 +185,9 @@ static inline void *vport_priv(const struct vport *vport)
  * the result of a hash table lookup.  @priv must point to the start of the
  * private data area.
  */
-static inline struct vport *vport_from_priv(const void *priv)
+static inline struct vport *vport_from_priv(void *priv)
 {
-       return (struct vport *)(priv - ALIGN(sizeof(struct vport), VPORT_ALIGN));
+       return (struct vport *)((u8 *)priv - ALIGN(sizeof(struct vport), VPORT_ALIGN));
 }
 
 void ovs_vport_receive(struct vport *, struct sk_buff *,
index 533ce4ff108ad94ff0a1e5205bc17f9c91c0b3ce..92f2c7107eec4f307cc50cdfedfb4ea2db0e59de 100644 (file)
@@ -128,6 +128,7 @@ static int pdiag_put_fanout(struct packet_sock *po, struct sk_buff *nlskb)
 
 static int sk_diag_fill(struct sock *sk, struct sk_buff *skb,
                        struct packet_diag_req *req,
+                       bool may_report_filterinfo,
                        struct user_namespace *user_ns,
                        u32 portid, u32 seq, u32 flags, int sk_ino)
 {
@@ -172,7 +173,8 @@ static int sk_diag_fill(struct sock *sk, struct sk_buff *skb,
                goto out_nlmsg_trim;
 
        if ((req->pdiag_show & PACKET_SHOW_FILTER) &&
-           sock_diag_put_filterinfo(user_ns, sk, skb, PACKET_DIAG_FILTER))
+           sock_diag_put_filterinfo(may_report_filterinfo, sk, skb,
+                                    PACKET_DIAG_FILTER))
                goto out_nlmsg_trim;
 
        return nlmsg_end(skb, nlh);
@@ -188,9 +190,11 @@ static int packet_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
        struct packet_diag_req *req;
        struct net *net;
        struct sock *sk;
+       bool may_report_filterinfo;
 
        net = sock_net(skb->sk);
        req = nlmsg_data(cb->nlh);
+       may_report_filterinfo = netlink_net_capable(cb->skb, CAP_NET_ADMIN);
 
        mutex_lock(&net->packet.sklist_lock);
        sk_for_each(sk, &net->packet.sklist) {
@@ -200,6 +204,7 @@ static int packet_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
                        goto next;
 
                if (sk_diag_fill(sk, skb, req,
+                                may_report_filterinfo,
                                 sk_user_ns(NETLINK_CB(cb->skb).sk),
                                 NETLINK_CB(cb->skb).portid,
                                 cb->nlh->nlmsg_seq, NLM_F_MULTI,
index dc15f430080831e74fade00799a661ab10cc6f84..b64151ade6b33a9cbacb0980d3ddbe03d8f7b4c8 100644 (file)
@@ -70,10 +70,10 @@ static int addr_doit(struct sk_buff *skb, struct nlmsghdr *nlh)
        int err;
        u8 pnaddr;
 
-       if (!capable(CAP_NET_ADMIN))
+       if (!netlink_capable(skb, CAP_NET_ADMIN))
                return -EPERM;
 
-       if (!capable(CAP_SYS_ADMIN))
+       if (!netlink_capable(skb, CAP_SYS_ADMIN))
                return -EPERM;
 
        ASSERT_RTNL();
@@ -233,10 +233,10 @@ static int route_doit(struct sk_buff *skb, struct nlmsghdr *nlh)
        int err;
        u8 dst;
 
-       if (!capable(CAP_NET_ADMIN))
+       if (!netlink_capable(skb, CAP_NET_ADMIN))
                return -EPERM;
 
-       if (!capable(CAP_SYS_ADMIN))
+       if (!netlink_capable(skb, CAP_SYS_ADMIN))
                return -EPERM;
 
        ASSERT_RTNL();
index 37be6e226d1b46fefe8e0cf554580b1faf19cfb8..1dde91e3dc7033c575dcfc041a23402f98e52239 100644 (file)
@@ -298,7 +298,7 @@ void rds_ib_send_cq_comp_handler(struct ib_cq *cq, void *context)
                rds_ib_stats_inc(s_ib_tx_cq_event);
 
                if (wc.wr_id == RDS_IB_ACK_WR_ID) {
-                       if (ic->i_ack_queued + HZ/2 < jiffies)
+                       if (time_after(jiffies, ic->i_ack_queued + HZ/2))
                                rds_ib_stats_inc(s_ib_tx_stalled);
                        rds_ib_ack_send_complete(ic);
                        continue;
@@ -315,7 +315,7 @@ void rds_ib_send_cq_comp_handler(struct ib_cq *cq, void *context)
 
                        rm = rds_ib_send_unmap_op(ic, send, wc.status);
 
-                       if (send->s_queued + HZ/2 < jiffies)
+                       if (time_after(jiffies, send->s_queued + HZ/2))
                                rds_ib_stats_inc(s_ib_tx_stalled);
 
                        if (send->s_op) {
index e40c3c5db2c41e543abed12149c791fa9edd2ec6..9105ea03aec5dc05bad0221eb00c4794eb6e463a 100644 (file)
@@ -232,7 +232,7 @@ void rds_iw_send_cq_comp_handler(struct ib_cq *cq, void *context)
                }
 
                if (wc.wr_id == RDS_IW_ACK_WR_ID) {
-                       if (ic->i_ack_queued + HZ/2 < jiffies)
+                       if (time_after(jiffies, ic->i_ack_queued + HZ/2))
                                rds_iw_stats_inc(s_iw_tx_stalled);
                        rds_iw_ack_send_complete(ic);
                        continue;
@@ -267,7 +267,7 @@ void rds_iw_send_cq_comp_handler(struct ib_cq *cq, void *context)
 
                        send->s_wr.opcode = 0xdead;
                        send->s_wr.num_sge = 1;
-                       if (send->s_queued + HZ/2 < jiffies)
+                       if (time_after(jiffies, send->s_queued + HZ/2))
                                rds_iw_stats_inc(s_iw_tx_stalled);
 
                        /* If a RDMA operation produced an error, signal this right
index 89c91515ed0c605b6ee63996d0c0a9692ea8fe91..139239d2cb228438e29f347b33035da15d5396c0 100644 (file)
@@ -111,8 +111,7 @@ static struct ctl_table rds_iw_sysctl_table[] = {
 
 void rds_iw_sysctl_exit(void)
 {
-       if (rds_iw_sysctl_hdr)
-               unregister_net_sysctl_table(rds_iw_sysctl_hdr);
+       unregister_net_sysctl_table(rds_iw_sysctl_hdr);
 }
 
 int rds_iw_sysctl_init(void)
index b5cb2aa08f33aa62ac5bff73684fc228f74dce7e..c3b0cd43eb56689e395581c4757402bad531e271 100644 (file)
@@ -94,8 +94,7 @@ static struct ctl_table rds_sysctl_rds_table[] = {
 
 void rds_sysctl_exit(void)
 {
-       if (rds_sysctl_reg_table)
-               unregister_net_sysctl_table(rds_sysctl_reg_table);
+       unregister_net_sysctl_table(rds_sysctl_reg_table);
 }
 
 int rds_sysctl_init(void)
index 8a5ba5add4bcd60e59a9b2468df88812212012f4..648778aef1a254b9739443e93012798a134d0d86 100644 (file)
@@ -948,7 +948,7 @@ static int tc_ctl_action(struct sk_buff *skb, struct nlmsghdr *n)
        u32 portid = skb ? NETLINK_CB(skb).portid : 0;
        int ret = 0, ovr = 0;
 
-       if ((n->nlmsg_type != RTM_GETACTION) && !capable(CAP_NET_ADMIN))
+       if ((n->nlmsg_type != RTM_GETACTION) && !netlink_capable(skb, CAP_NET_ADMIN))
                return -EPERM;
 
        ret = nlmsg_parse(n, sizeof(struct tcamsg), tca, TCA_ACT_MAX, NULL);
index 29a30a14c31596cc51028be8db46eb4df9756f6e..45527e6b52dbf396cbb7415bb0613152a8320096 100644 (file)
@@ -134,7 +134,8 @@ static int tc_ctl_tfilter(struct sk_buff *skb, struct nlmsghdr *n)
        int err;
        int tp_created = 0;
 
-       if ((n->nlmsg_type != RTM_GETTFILTER) && !capable(CAP_NET_ADMIN))
+       if ((n->nlmsg_type != RTM_GETTFILTER) &&
+           !netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
                return -EPERM;
 
 replay:
@@ -317,7 +318,8 @@ replay:
                }
        }
 
-       err = tp->ops->change(net, skb, tp, cl, t->tcm_handle, tca, &fh);
+       err = tp->ops->change(net, skb, tp, cl, t->tcm_handle, tca, &fh,
+                             n->nlmsg_flags & NLM_F_CREATE ? TCA_ACT_NOREPLACE : TCA_ACT_REPLACE);
        if (err == 0) {
                if (tp_created) {
                        spin_lock_bh(root_lock);
@@ -504,7 +506,7 @@ void tcf_exts_destroy(struct tcf_proto *tp, struct tcf_exts *exts)
 EXPORT_SYMBOL(tcf_exts_destroy);
 
 int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb,
-                 struct nlattr *rate_tlv, struct tcf_exts *exts)
+                 struct nlattr *rate_tlv, struct tcf_exts *exts, bool ovr)
 {
 #ifdef CONFIG_NET_CLS_ACT
        {
@@ -513,7 +515,7 @@ int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb,
                INIT_LIST_HEAD(&exts->actions);
                if (exts->police && tb[exts->police]) {
                        act = tcf_action_init_1(net, tb[exts->police], rate_tlv,
-                                               "police", TCA_ACT_NOREPLACE,
+                                               "police", ovr,
                                                TCA_ACT_BIND);
                        if (IS_ERR(act))
                                return PTR_ERR(act);
@@ -523,7 +525,7 @@ int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb,
                } else if (exts->action && tb[exts->action]) {
                        int err;
                        err = tcf_action_init(net, tb[exts->action], rate_tlv,
-                                             NULL, TCA_ACT_NOREPLACE,
+                                             NULL, ovr,
                                              TCA_ACT_BIND, &exts->actions);
                        if (err)
                                return err;
@@ -543,14 +545,12 @@ void tcf_exts_change(struct tcf_proto *tp, struct tcf_exts *dst,
                     struct tcf_exts *src)
 {
 #ifdef CONFIG_NET_CLS_ACT
-       if (!list_empty(&src->actions)) {
-               LIST_HEAD(tmp);
-               tcf_tree_lock(tp);
-               list_splice_init(&dst->actions, &tmp);
-               list_splice(&src->actions, &dst->actions);
-               tcf_tree_unlock(tp);
-               tcf_action_destroy(&tmp, TCA_ACT_UNBIND);
-       }
+       LIST_HEAD(tmp);
+       tcf_tree_lock(tp);
+       list_splice_init(&dst->actions, &tmp);
+       list_splice(&src->actions, &dst->actions);
+       tcf_tree_unlock(tp);
+       tcf_action_destroy(&tmp, TCA_ACT_UNBIND);
 #endif
 }
 EXPORT_SYMBOL(tcf_exts_change);
index e98ca99c202bb5af6db77260f0996e6cacb098cf..0ae1813e3e90d55a1bf5993364502cebd58c1b22 100644 (file)
@@ -130,14 +130,14 @@ static const struct nla_policy basic_policy[TCA_BASIC_MAX + 1] = {
 static int basic_set_parms(struct net *net, struct tcf_proto *tp,
                           struct basic_filter *f, unsigned long base,
                           struct nlattr **tb,
-                          struct nlattr *est)
+                          struct nlattr *est, bool ovr)
 {
        int err;
        struct tcf_exts e;
        struct tcf_ematch_tree t;
 
        tcf_exts_init(&e, TCA_BASIC_ACT, TCA_BASIC_POLICE);
-       err = tcf_exts_validate(net, tp, tb, est, &e);
+       err = tcf_exts_validate(net, tp, tb, est, &e, ovr);
        if (err < 0)
                return err;
 
@@ -161,7 +161,7 @@ errout:
 
 static int basic_change(struct net *net, struct sk_buff *in_skb,
                        struct tcf_proto *tp, unsigned long base, u32 handle,
-                       struct nlattr **tca, unsigned long *arg)
+                       struct nlattr **tca, unsigned long *arg, bool ovr)
 {
        int err;
        struct basic_head *head = tp->root;
@@ -179,7 +179,7 @@ static int basic_change(struct net *net, struct sk_buff *in_skb,
        if (f != NULL) {
                if (handle && f->handle != handle)
                        return -EINVAL;
-               return basic_set_parms(net, tp, f, base, tb, tca[TCA_RATE]);
+               return basic_set_parms(net, tp, f, base, tb, tca[TCA_RATE], ovr);
        }
 
        err = -ENOBUFS;
@@ -206,7 +206,7 @@ static int basic_change(struct net *net, struct sk_buff *in_skb,
                f->handle = head->hgenerator;
        }
 
-       err = basic_set_parms(net, tp, f, base, tb, tca[TCA_RATE]);
+       err = basic_set_parms(net, tp, f, base, tb, tca[TCA_RATE], ovr);
        if (err < 0)
                goto errout;
 
index 8e3cf49118e3a2297214de12313bb499cbfe9054..16186965af97fd4cc7cad6725842aba8a56225dc 100644 (file)
@@ -156,7 +156,7 @@ static void cls_bpf_put(struct tcf_proto *tp, unsigned long f)
 static int cls_bpf_modify_existing(struct net *net, struct tcf_proto *tp,
                                   struct cls_bpf_prog *prog,
                                   unsigned long base, struct nlattr **tb,
-                                  struct nlattr *est)
+                                  struct nlattr *est, bool ovr)
 {
        struct sock_filter *bpf_ops, *bpf_old;
        struct tcf_exts exts;
@@ -170,7 +170,7 @@ static int cls_bpf_modify_existing(struct net *net, struct tcf_proto *tp,
                return -EINVAL;
 
        tcf_exts_init(&exts, TCA_BPF_ACT, TCA_BPF_POLICE);
-       ret = tcf_exts_validate(net, tp, tb, est, &exts);
+       ret = tcf_exts_validate(net, tp, tb, est, &exts, ovr);
        if (ret < 0)
                return ret;
 
@@ -242,7 +242,7 @@ static u32 cls_bpf_grab_new_handle(struct tcf_proto *tp,
 static int cls_bpf_change(struct net *net, struct sk_buff *in_skb,
                          struct tcf_proto *tp, unsigned long base,
                          u32 handle, struct nlattr **tca,
-                         unsigned long *arg)
+                         unsigned long *arg, bool ovr)
 {
        struct cls_bpf_head *head = tp->root;
        struct cls_bpf_prog *prog = (struct cls_bpf_prog *) *arg;
@@ -260,7 +260,7 @@ static int cls_bpf_change(struct net *net, struct sk_buff *in_skb,
                if (handle && prog->handle != handle)
                        return -EINVAL;
                return cls_bpf_modify_existing(net, tp, prog, base, tb,
-                                              tca[TCA_RATE]);
+                                              tca[TCA_RATE], ovr);
        }
 
        prog = kzalloc(sizeof(*prog), GFP_KERNEL);
@@ -277,7 +277,7 @@ static int cls_bpf_change(struct net *net, struct sk_buff *in_skb,
                goto errout;
        }
 
-       ret = cls_bpf_modify_existing(net, tp, prog, base, tb, tca[TCA_RATE]);
+       ret = cls_bpf_modify_existing(net, tp, prog, base, tb, tca[TCA_RATE], ovr);
        if (ret < 0)
                goto errout;
 
index 8e2158ab551c0c9d7258ffae3e474788bfc64565..cacf01bd04f0a96660050c0e71da0840c8af0f95 100644 (file)
@@ -83,7 +83,7 @@ static const struct nla_policy cgroup_policy[TCA_CGROUP_MAX + 1] = {
 static int cls_cgroup_change(struct net *net, struct sk_buff *in_skb,
                             struct tcf_proto *tp, unsigned long base,
                             u32 handle, struct nlattr **tca,
-                            unsigned long *arg)
+                            unsigned long *arg, bool ovr)
 {
        struct nlattr *tb[TCA_CGROUP_MAX + 1];
        struct cls_cgroup_head *head = tp->root;
@@ -119,7 +119,7 @@ static int cls_cgroup_change(struct net *net, struct sk_buff *in_skb,
                return err;
 
        tcf_exts_init(&e, TCA_CGROUP_ACT, TCA_CGROUP_POLICE);
-       err = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &e);
+       err = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &e, ovr);
        if (err < 0)
                return err;
 
index 257029c5433298f62101533ada3a81c246eb0d7a..35be16f7c192dc8d5d2ebdd38c2b27a144c41076 100644 (file)
@@ -349,7 +349,7 @@ static const struct nla_policy flow_policy[TCA_FLOW_MAX + 1] = {
 static int flow_change(struct net *net, struct sk_buff *in_skb,
                       struct tcf_proto *tp, unsigned long base,
                       u32 handle, struct nlattr **tca,
-                      unsigned long *arg)
+                      unsigned long *arg, bool ovr)
 {
        struct flow_head *head = tp->root;
        struct flow_filter *f;
@@ -393,7 +393,7 @@ static int flow_change(struct net *net, struct sk_buff *in_skb,
        }
 
        tcf_exts_init(&e, TCA_FLOW_ACT, TCA_FLOW_POLICE);
-       err = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &e);
+       err = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &e, ovr);
        if (err < 0)
                return err;
 
index 63a3ce75c02ee959fe67d6b203e01420d86b03ad..861b03ccfed0a55007ceb001a297b05906b36ed3 100644 (file)
@@ -169,7 +169,7 @@ static const struct nla_policy fw_policy[TCA_FW_MAX + 1] = {
 
 static int
 fw_change_attrs(struct net *net, struct tcf_proto *tp, struct fw_filter *f,
-       struct nlattr **tb, struct nlattr **tca, unsigned long base)
+       struct nlattr **tb, struct nlattr **tca, unsigned long base, bool ovr)
 {
        struct fw_head *head = tp->root;
        struct tcf_exts e;
@@ -177,7 +177,7 @@ fw_change_attrs(struct net *net, struct tcf_proto *tp, struct fw_filter *f,
        int err;
 
        tcf_exts_init(&e, TCA_FW_ACT, TCA_FW_POLICE);
-       err = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &e);
+       err = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &e, ovr);
        if (err < 0)
                return err;
 
@@ -218,7 +218,7 @@ static int fw_change(struct net *net, struct sk_buff *in_skb,
                     struct tcf_proto *tp, unsigned long base,
                     u32 handle,
                     struct nlattr **tca,
-                    unsigned long *arg)
+                    unsigned long *arg, bool ovr)
 {
        struct fw_head *head = tp->root;
        struct fw_filter *f = (struct fw_filter *) *arg;
@@ -236,7 +236,7 @@ static int fw_change(struct net *net, struct sk_buff *in_skb,
        if (f != NULL) {
                if (f->id != handle && handle)
                        return -EINVAL;
-               return fw_change_attrs(net, tp, f, tb, tca, base);
+               return fw_change_attrs(net, tp, f, tb, tca, base, ovr);
        }
 
        if (!handle)
@@ -264,7 +264,7 @@ static int fw_change(struct net *net, struct sk_buff *in_skb,
        tcf_exts_init(&f->exts, TCA_FW_ACT, TCA_FW_POLICE);
        f->id = handle;
 
-       err = fw_change_attrs(net, tp, f, tb, tca, base);
+       err = fw_change_attrs(net, tp, f, tb, tca, base, ovr);
        if (err < 0)
                goto errout;
 
index 1ad3068f2ce16e2c6ba15985c40cf899a7030ba7..dd9fc2523c76a2b0b9fe4c5225328cc29c1649f6 100644 (file)
@@ -333,7 +333,8 @@ static const struct nla_policy route4_policy[TCA_ROUTE4_MAX + 1] = {
 static int route4_set_parms(struct net *net, struct tcf_proto *tp,
                            unsigned long base, struct route4_filter *f,
                            u32 handle, struct route4_head *head,
-                           struct nlattr **tb, struct nlattr *est, int new)
+                           struct nlattr **tb, struct nlattr *est, int new,
+                           bool ovr)
 {
        int err;
        u32 id = 0, to = 0, nhandle = 0x8000;
@@ -343,7 +344,7 @@ static int route4_set_parms(struct net *net, struct tcf_proto *tp,
        struct tcf_exts e;
 
        tcf_exts_init(&e, TCA_ROUTE4_ACT, TCA_ROUTE4_POLICE);
-       err = tcf_exts_validate(net, tp, tb, est, &e);
+       err = tcf_exts_validate(net, tp, tb, est, &e, ovr);
        if (err < 0)
                return err;
 
@@ -428,7 +429,7 @@ static int route4_change(struct net *net, struct sk_buff *in_skb,
                       struct tcf_proto *tp, unsigned long base,
                       u32 handle,
                       struct nlattr **tca,
-                      unsigned long *arg)
+                      unsigned long *arg, bool ovr)
 {
        struct route4_head *head = tp->root;
        struct route4_filter *f, *f1, **fp;
@@ -455,7 +456,7 @@ static int route4_change(struct net *net, struct sk_buff *in_skb,
                        old_handle = f->handle;
 
                err = route4_set_parms(net, tp, base, f, handle, head, tb,
-                       tca[TCA_RATE], 0);
+                       tca[TCA_RATE], 0, ovr);
                if (err < 0)
                        return err;
 
@@ -479,7 +480,7 @@ static int route4_change(struct net *net, struct sk_buff *in_skb,
 
        tcf_exts_init(&f->exts, TCA_ROUTE4_ACT, TCA_ROUTE4_POLICE);
        err = route4_set_parms(net, tp, base, f, handle, head, tb,
-               tca[TCA_RATE], 1);
+               tca[TCA_RATE], 1, ovr);
        if (err < 0)
                goto errout;
 
index 19f8e5dfa8bdaebcd9ab903050047e7e49690530..1020e233a5d6c74092fb153133b1bfed7f4177a9 100644 (file)
@@ -415,7 +415,7 @@ static int rsvp_change(struct net *net, struct sk_buff *in_skb,
                       struct tcf_proto *tp, unsigned long base,
                       u32 handle,
                       struct nlattr **tca,
-                      unsigned long *arg)
+                      unsigned long *arg, bool ovr)
 {
        struct rsvp_head *data = tp->root;
        struct rsvp_filter *f, **fp;
@@ -436,7 +436,7 @@ static int rsvp_change(struct net *net, struct sk_buff *in_skb,
                return err;
 
        tcf_exts_init(&e, TCA_RSVP_ACT, TCA_RSVP_POLICE);
-       err = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &e);
+       err = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &e, ovr);
        if (err < 0)
                return err;
 
index eed8404443d8f0145942c3e459b79934ada9c48d..d11d0a4fbe34f6af8390e9752468b2446079733a 100644 (file)
@@ -192,7 +192,7 @@ static int
 tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
                  u32 handle, struct tcindex_data *p,
                  struct tcindex_filter_result *r, struct nlattr **tb,
-                struct nlattr *est)
+                 struct nlattr *est, bool ovr)
 {
        int err, balloc = 0;
        struct tcindex_filter_result new_filter_result, *old_r = r;
@@ -202,7 +202,7 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
        struct tcf_exts e;
 
        tcf_exts_init(&e, TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE);
-       err = tcf_exts_validate(net, tp, tb, est, &e);
+       err = tcf_exts_validate(net, tp, tb, est, &e, ovr);
        if (err < 0)
                return err;
 
@@ -331,7 +331,7 @@ errout:
 static int
 tcindex_change(struct net *net, struct sk_buff *in_skb,
               struct tcf_proto *tp, unsigned long base, u32 handle,
-              struct nlattr **tca, unsigned long *arg)
+              struct nlattr **tca, unsigned long *arg, bool ovr)
 {
        struct nlattr *opt = tca[TCA_OPTIONS];
        struct nlattr *tb[TCA_TCINDEX_MAX + 1];
@@ -351,7 +351,7 @@ tcindex_change(struct net *net, struct sk_buff *in_skb,
                return err;
 
        return tcindex_set_parms(net, tp, base, handle, p, r, tb,
-                                tca[TCA_RATE]);
+                                tca[TCA_RATE], ovr);
 }
 
 
index 84c28daff8484f643e5bed4176f6b225eec34e66..c39b583ace3229d4bae6a7b3774593e5eebd7141 100644 (file)
@@ -486,13 +486,13 @@ static const struct nla_policy u32_policy[TCA_U32_MAX + 1] = {
 static int u32_set_parms(struct net *net, struct tcf_proto *tp,
                         unsigned long base, struct tc_u_hnode *ht,
                         struct tc_u_knode *n, struct nlattr **tb,
-                        struct nlattr *est)
+                        struct nlattr *est, bool ovr)
 {
        int err;
        struct tcf_exts e;
 
        tcf_exts_init(&e, TCA_U32_ACT, TCA_U32_POLICE);
-       err = tcf_exts_validate(net, tp, tb, est, &e);
+       err = tcf_exts_validate(net, tp, tb, est, &e, ovr);
        if (err < 0)
                return err;
 
@@ -545,7 +545,7 @@ errout:
 static int u32_change(struct net *net, struct sk_buff *in_skb,
                      struct tcf_proto *tp, unsigned long base, u32 handle,
                      struct nlattr **tca,
-                     unsigned long *arg)
+                     unsigned long *arg, bool ovr)
 {
        struct tc_u_common *tp_c = tp->data;
        struct tc_u_hnode *ht;
@@ -569,7 +569,7 @@ static int u32_change(struct net *net, struct sk_buff *in_skb,
                        return -EINVAL;
 
                return u32_set_parms(net, tp, base, n->ht_up, n, tb,
-                                    tca[TCA_RATE]);
+                                    tca[TCA_RATE], ovr);
        }
 
        if (tb[TCA_U32_DIVISOR]) {
@@ -656,7 +656,7 @@ static int u32_change(struct net *net, struct sk_buff *in_skb,
        }
 #endif
 
-       err = u32_set_parms(net, tp, base, ht, n, tb, tca[TCA_RATE]);
+       err = u32_set_parms(net, tp, base, ht, n, tb, tca[TCA_RATE], ovr);
        if (err == 0) {
                struct tc_u_knode **ins;
                for (ins = &ht->ht[TC_U32_HASH(handle)]; *ins; ins = &(*ins)->next)
index a0b84e0e22deb4c9e998499b1e202ebaafda95f8..fd14df56e5ffdc2d96d61abbe55b2607c95179c5 100644 (file)
@@ -1084,7 +1084,8 @@ static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n)
        struct Qdisc *p = NULL;
        int err;
 
-       if ((n->nlmsg_type != RTM_GETQDISC) && !capable(CAP_NET_ADMIN))
+       if ((n->nlmsg_type != RTM_GETQDISC) &&
+           !netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
                return -EPERM;
 
        err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL);
@@ -1151,7 +1152,7 @@ static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n)
        struct Qdisc *q, *p;
        int err;
 
-       if (!capable(CAP_NET_ADMIN))
+       if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
                return -EPERM;
 
 replay:
@@ -1490,7 +1491,8 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n)
        u32 qid;
        int err;
 
-       if ((n->nlmsg_type != RTM_GETTCLASS) && !capable(CAP_NET_ADMIN))
+       if ((n->nlmsg_type != RTM_GETTCLASS) &&
+           !netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
                return -EPERM;
 
        err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL);
index edee03d922e28678cc4f4ba843f600236cf06f08..6aab8619bbb002570d206d9ff6bb9c90eb82261a 100644 (file)
@@ -414,7 +414,7 @@ static int hhf_enqueue(struct sk_buff *skb, struct Qdisc *sch)
                }
                bucket->deficit = weight * q->quantum;
        }
-       if (++sch->q.qlen < sch->limit)
+       if (++sch->q.qlen <= sch->limit)
                return NET_XMIT_SUCCESS;
 
        q->drop_overlimit++;
@@ -553,11 +553,6 @@ static int hhf_change(struct Qdisc *sch, struct nlattr *opt)
        if (err < 0)
                return err;
 
-       sch_tree_lock(sch);
-
-       if (tb[TCA_HHF_BACKLOG_LIMIT])
-               sch->limit = nla_get_u32(tb[TCA_HHF_BACKLOG_LIMIT]);
-
        if (tb[TCA_HHF_QUANTUM])
                new_quantum = nla_get_u32(tb[TCA_HHF_QUANTUM]);
 
@@ -567,6 +562,12 @@ static int hhf_change(struct Qdisc *sch, struct nlattr *opt)
        non_hh_quantum = (u64)new_quantum * new_hhf_non_hh_weight;
        if (non_hh_quantum > INT_MAX)
                return -EINVAL;
+
+       sch_tree_lock(sch);
+
+       if (tb[TCA_HHF_BACKLOG_LIMIT])
+               sch->limit = nla_get_u32(tb[TCA_HHF_BACKLOG_LIMIT]);
+
        q->quantum = new_quantum;
        q->hhf_non_hh_weight = new_hhf_non_hh_weight;
 
index 2b1738ef9394537589b403f7d299181e18fb2315..4dc5d9e083115983595edcdf10641e78d1fdfb41 100644 (file)
@@ -216,7 +216,7 @@ static int sctp_v6_xmit(struct sk_buff *skb, struct sctp_transport *transport)
        IP6_ECN_flow_xmit(sk, fl6->flowlabel);
 
        if (!(transport->param_flags & SPP_PMTUD_ENABLE))
-               skb->local_df = 1;
+               skb->ignore_df = 1;
 
        SCTP_INC_STATS(sock_net(sk), SCTP_MIB_OUTSCTPPACKS);
 
index 0f4d15fc2627bcccb546ee2da883f812daa4e4a1..01ab8e0723f04ea845ba81045228786ef07c97ad 100644 (file)
@@ -591,7 +591,7 @@ int sctp_packet_transmit(struct sctp_packet *packet)
 
        pr_debug("***sctp_transmit_packet*** skb->len:%d\n", nskb->len);
 
-       nskb->local_df = packet->ipfragok;
+       nskb->ignore_df = packet->ipfragok;
        tp->af_specific->sctp_xmit(nskb, tp);
 
 out:
index 0947f1e15eb88a0381434a05da082bf3d6fd9797..34229ee7f379902b16a7659f82545f10e5908b17 100644 (file)
@@ -78,7 +78,7 @@ static int sctp_snmp_seq_show(struct seq_file *seq, void *v)
 
        for (i = 0; sctp_snmp_list[i].name != NULL; i++)
                seq_printf(seq, "%-32s\t%ld\n", sctp_snmp_list[i].name,
-                          snmp_fold_field((void __percpu **)net->sctp.sctp_statistics,
+                          snmp_fold_field(net->sctp.sctp_statistics,
                                      sctp_snmp_list[i].entry));
 
        return 0;
index c09757fbf8039e76c935c1dbf33ab2b19be228b1..af5afca4b85a2df42c998474fbae14c057509c13 100644 (file)
@@ -491,8 +491,13 @@ static void sctp_v4_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
                        continue;
                if ((laddr->state == SCTP_ADDR_SRC) &&
                    (AF_INET == laddr->a.sa.sa_family)) {
-                       fl4->saddr = laddr->a.v4.sin_addr.s_addr;
                        fl4->fl4_sport = laddr->a.v4.sin_port;
+                       flowi4_update_output(fl4,
+                                            asoc->base.sk->sk_bound_dev_if,
+                                            RT_CONN_FLAGS(asoc->base.sk),
+                                            daddr->v4.sin_addr.s_addr,
+                                            laddr->a.v4.sin_addr.s_addr);
+
                        rt = ip_route_output_key(sock_net(sk), fl4);
                        if (!IS_ERR(rt)) {
                                dst = &rt->dst;
@@ -1100,14 +1105,15 @@ int sctp_register_pf(struct sctp_pf *pf, sa_family_t family)
 
 static inline int init_sctp_mibs(struct net *net)
 {
-       return snmp_mib_init((void __percpu **)net->sctp.sctp_statistics,
-                            sizeof(struct sctp_mib),
-                            __alignof__(struct sctp_mib));
+       net->sctp.sctp_statistics = alloc_percpu(struct sctp_mib);
+       if (!net->sctp.sctp_statistics)
+               return -ENOMEM;
+       return 0;
 }
 
 static inline void cleanup_sctp_mibs(struct net *net)
 {
-       snmp_mib_free((void __percpu **)net->sctp.sctp_statistics);
+       free_percpu(net->sctp.sctp_statistics);
 }
 
 static void sctp_v4_pf_init(void)
index 5d6883ff00c3b7056639f06254caffcb14349e27..fef2acdf4a2e675c55dc9fbf2124d132499b89e3 100644 (file)
@@ -496,11 +496,10 @@ static void sctp_do_8_2_transport_strike(sctp_cmd_seq_t *commands,
 
        /* If the transport error count is greater than the pf_retrans
         * threshold, and less than pathmaxrtx, and if the current state
-        * is not SCTP_UNCONFIRMED, then mark this transport as Partially
-        * Failed, see SCTP Quick Failover Draft, section 5.1
+        * is SCTP_ACTIVE, then mark this transport as Partially Failed,
+        * see SCTP Quick Failover Draft, section 5.1
         */
-       if ((transport->state != SCTP_PF) &&
-          (transport->state != SCTP_UNCONFIRMED) &&
+       if ((transport->state == SCTP_ACTIVE) &&
           (asoc->pf_retrans < transport->pathmaxrxt) &&
           (transport->error_count > asoc->pf_retrans)) {
 
index e37b2cbbf177da9739d54a2b101d3a4e74299745..2af76eaba8f784e4d7b4276c8c2ef46b1ef30925 100644 (file)
@@ -5946,8 +5946,9 @@ static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr)
                /* Search for an available port. */
                int low, high, remaining, index;
                unsigned int rover;
+               struct net *net = sock_net(sk);
 
-               inet_get_local_port_range(sock_net(sk), &low, &high);
+               inet_get_local_port_range(net, &low, &high);
                remaining = (high - low) + 1;
                rover = prandom_u32() % remaining + low;
 
@@ -5955,7 +5956,7 @@ static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr)
                        rover++;
                        if ((rover < low) || (rover > high))
                                rover = low;
-                       if (inet_is_reserved_local_port(rover))
+                       if (inet_is_local_reserved_port(net, rover))
                                continue;
                        index = sctp_phashfn(sock_net(sk), rover);
                        head = &sctp_port_hashtable[index];
index c82fdc1eab7c359dbee3812db3f3707fa5c65560..7e5eb75549902eeeb2c0c0889daca8ee53317ccf 100644 (file)
@@ -436,20 +436,21 @@ static int proc_sctp_do_auth(struct ctl_table *ctl, int write,
 
 int sctp_sysctl_net_register(struct net *net)
 {
-       struct ctl_table *table = sctp_net_table;
-
-       if (!net_eq(net, &init_net)) {
-               int i;
+       struct ctl_table *table;
+       int i;
 
-               table = kmemdup(sctp_net_table, sizeof(sctp_net_table), GFP_KERNEL);
-               if (!table)
-                       return -ENOMEM;
+       table = kmemdup(sctp_net_table, sizeof(sctp_net_table), GFP_KERNEL);
+       if (!table)
+               return -ENOMEM;
 
-               for (i = 0; table[i].data; i++)
-                       table[i].data += (char *)(&net->sctp) - (char *)&init_net.sctp;
-       }
+       for (i = 0; table[i].data; i++)
+               table[i].data += (char *)(&net->sctp) - (char *)&init_net.sctp;
 
        net->sctp.sysctl_header = register_net_sysctl(net, "net/sctp", table);
+       if (net->sctp.sysctl_header == NULL) {
+               kfree(table);
+               return -ENOMEM;
+       }
        return 0;
 }
 
index b282f7130d2bb51f0dee12e4800a9a0ad54a33a7..a080c66d819a032233a963512d849f757cc979e2 100644 (file)
@@ -5,7 +5,7 @@
 obj-$(CONFIG_TIPC) := tipc.o
 
 tipc-y += addr.o bcast.o bearer.o config.o \
-          core.o handler.o link.o discover.o msg.o  \
+          core.o link.o discover.o msg.o  \
           name_distr.o  subscr.o name_table.o net.o  \
           netlink.o node.o node_subscr.o port.o ref.o  \
           socket.o log.o eth_media.o server.o
index 95ab5ef92920fddf34c02478973b98ebbe96ba59..671f9817b4f4c3a6e8c0a0a9eec9e96e1cb9b359 100644 (file)
@@ -71,7 +71,7 @@ struct tipc_bcbearer_pair {
  * Note: The fields labelled "temporary" are incorporated into the bearer
  * to avoid consuming potentially limited stack space through the use of
  * large local variables within multicast routines.  Concurrent access is
- * prevented through use of the spinlock "bc_lock".
+ * prevented through use of the spinlock "bclink_lock".
  */
 struct tipc_bcbearer {
        struct tipc_bearer bearer;
@@ -84,34 +84,64 @@ struct tipc_bcbearer {
 
 /**
  * struct tipc_bclink - link used for broadcast messages
+ * @lock: spinlock governing access to structure
  * @link: (non-standard) broadcast link structure
  * @node: (non-standard) node structure representing b'cast link's peer node
+ * @flags: represent bclink states
  * @bcast_nodes: map of broadcast-capable nodes
  * @retransmit_to: node that most recently requested a retransmit
  *
  * Handles sequence numbering, fragmentation, bundling, etc.
  */
 struct tipc_bclink {
+       spinlock_t lock;
        struct tipc_link link;
        struct tipc_node node;
+       unsigned int flags;
        struct tipc_node_map bcast_nodes;
        struct tipc_node *retransmit_to;
 };
 
-static struct tipc_bcbearer bcast_bearer;
-static struct tipc_bclink bcast_link;
-
-static struct tipc_bcbearer *bcbearer = &bcast_bearer;
-static struct tipc_bclink *bclink = &bcast_link;
-static struct tipc_link *bcl = &bcast_link.link;
-
-static DEFINE_SPINLOCK(bc_lock);
+static struct tipc_bcbearer *bcbearer;
+static struct tipc_bclink *bclink;
+static struct tipc_link *bcl;
 
 const char tipc_bclink_name[] = "broadcast-link";
 
 static void tipc_nmap_diff(struct tipc_node_map *nm_a,
                           struct tipc_node_map *nm_b,
                           struct tipc_node_map *nm_diff);
+static void tipc_nmap_add(struct tipc_node_map *nm_ptr, u32 node);
+static void tipc_nmap_remove(struct tipc_node_map *nm_ptr, u32 node);
+
+static void tipc_bclink_lock(void)
+{
+       spin_lock_bh(&bclink->lock);
+}
+
+static void tipc_bclink_unlock(void)
+{
+       struct tipc_node *node = NULL;
+
+       if (likely(!bclink->flags)) {
+               spin_unlock_bh(&bclink->lock);
+               return;
+       }
+
+       if (bclink->flags & TIPC_BCLINK_RESET) {
+               bclink->flags &= ~TIPC_BCLINK_RESET;
+               node = tipc_bclink_retransmit_to();
+       }
+       spin_unlock_bh(&bclink->lock);
+
+       if (node)
+               tipc_link_reset_all(node);
+}
+
+void tipc_bclink_set_flags(unsigned int flags)
+{
+       bclink->flags |= flags;
+}
 
 static u32 bcbuf_acks(struct sk_buff *buf)
 {
@@ -130,16 +160,16 @@ static void bcbuf_decr_acks(struct sk_buff *buf)
 
 void tipc_bclink_add_node(u32 addr)
 {
-       spin_lock_bh(&bc_lock);
+       tipc_bclink_lock();
        tipc_nmap_add(&bclink->bcast_nodes, addr);
-       spin_unlock_bh(&bc_lock);
+       tipc_bclink_unlock();
 }
 
 void tipc_bclink_remove_node(u32 addr)
 {
-       spin_lock_bh(&bc_lock);
+       tipc_bclink_lock();
        tipc_nmap_remove(&bclink->bcast_nodes, addr);
-       spin_unlock_bh(&bc_lock);
+       tipc_bclink_unlock();
 }
 
 static void bclink_set_last_sent(void)
@@ -165,7 +195,7 @@ static void bclink_update_last_sent(struct tipc_node *node, u32 seqno)
 /**
  * tipc_bclink_retransmit_to - get most recent node to request retransmission
  *
- * Called with bc_lock locked
+ * Called with bclink_lock locked
  */
 struct tipc_node *tipc_bclink_retransmit_to(void)
 {
@@ -177,7 +207,7 @@ struct tipc_node *tipc_bclink_retransmit_to(void)
  * @after: sequence number of last packet to *not* retransmit
  * @to: sequence number of last packet to retransmit
  *
- * Called with bc_lock locked
+ * Called with bclink_lock locked
  */
 static void bclink_retransmit_pkt(u32 after, u32 to)
 {
@@ -194,7 +224,7 @@ static void bclink_retransmit_pkt(u32 after, u32 to)
  * @n_ptr: node that sent acknowledgement info
  * @acked: broadcast sequence # that has been acknowledged
  *
- * Node is locked, bc_lock unlocked.
+ * Node is locked, bclink_lock unlocked.
  */
 void tipc_bclink_acknowledge(struct tipc_node *n_ptr, u32 acked)
 {
@@ -202,8 +232,7 @@ void tipc_bclink_acknowledge(struct tipc_node *n_ptr, u32 acked)
        struct sk_buff *next;
        unsigned int released = 0;
 
-       spin_lock_bh(&bc_lock);
-
+       tipc_bclink_lock();
        /* Bail out if tx queue is empty (no clean up is required) */
        crs = bcl->first_out;
        if (!crs)
@@ -267,13 +296,13 @@ void tipc_bclink_acknowledge(struct tipc_node *n_ptr, u32 acked)
        if (unlikely(released && !list_empty(&bcl->waiting_ports)))
                tipc_link_wakeup_ports(bcl, 0);
 exit:
-       spin_unlock_bh(&bc_lock);
+       tipc_bclink_unlock();
 }
 
 /**
  * tipc_bclink_update_link_state - update broadcast link state
  *
- * tipc_net_lock and node lock set
+ * RCU and node lock set
  */
 void tipc_bclink_update_link_state(struct tipc_node *n_ptr, u32 last_sent)
 {
@@ -320,10 +349,10 @@ void tipc_bclink_update_link_state(struct tipc_node *n_ptr, u32 last_sent)
                                 ? buf_seqno(n_ptr->bclink.deferred_head) - 1
                                 : n_ptr->bclink.last_sent);
 
-               spin_lock_bh(&bc_lock);
-               tipc_bearer_send(&bcbearer->bearer, buf, NULL);
+               tipc_bclink_lock();
+               tipc_bearer_send(MAX_BEARERS, buf, NULL);
                bcl->stats.sent_nacks++;
-               spin_unlock_bh(&bc_lock);
+               tipc_bclink_unlock();
                kfree_skb(buf);
 
                n_ptr->bclink.oos_state++;
@@ -335,8 +364,6 @@ void tipc_bclink_update_link_state(struct tipc_node *n_ptr, u32 last_sent)
  *
  * Delay any upcoming NACK by this node if another node has already
  * requested the first message this node is going to ask for.
- *
- * Only tipc_net_lock set.
  */
 static void bclink_peek_nack(struct tipc_msg *msg)
 {
@@ -362,7 +389,7 @@ int tipc_bclink_xmit(struct sk_buff *buf)
 {
        int res;
 
-       spin_lock_bh(&bc_lock);
+       tipc_bclink_lock();
 
        if (!bclink->bcast_nodes.count) {
                res = msg_data_sz(buf_msg(buf));
@@ -377,14 +404,14 @@ int tipc_bclink_xmit(struct sk_buff *buf)
                bcl->stats.accu_queue_sz += bcl->out_queue_size;
        }
 exit:
-       spin_unlock_bh(&bc_lock);
+       tipc_bclink_unlock();
        return res;
 }
 
 /**
  * bclink_accept_pkt - accept an incoming, in-sequence broadcast packet
  *
- * Called with both sending node's lock and bc_lock taken.
+ * Called with both sending node's lock and bclink_lock taken.
  */
 static void bclink_accept_pkt(struct tipc_node *node, u32 seqno)
 {
@@ -408,7 +435,7 @@ static void bclink_accept_pkt(struct tipc_node *node, u32 seqno)
 /**
  * tipc_bclink_rcv - receive a broadcast packet, and deliver upwards
  *
- * tipc_net_lock is read_locked, no other locks set
+ * RCU is locked, no other locks set
  */
 void tipc_bclink_rcv(struct sk_buff *buf)
 {
@@ -439,12 +466,12 @@ void tipc_bclink_rcv(struct sk_buff *buf)
                if (msg_destnode(msg) == tipc_own_addr) {
                        tipc_bclink_acknowledge(node, msg_bcast_ack(msg));
                        tipc_node_unlock(node);
-                       spin_lock_bh(&bc_lock);
+                       tipc_bclink_lock();
                        bcl->stats.recv_nacks++;
                        bclink->retransmit_to = node;
                        bclink_retransmit_pkt(msg_bcgap_after(msg),
                                              msg_bcgap_to(msg));
-                       spin_unlock_bh(&bc_lock);
+                       tipc_bclink_unlock();
                } else {
                        tipc_node_unlock(node);
                        bclink_peek_nack(msg);
@@ -462,51 +489,47 @@ receive:
                /* Deliver message to destination */
 
                if (likely(msg_isdata(msg))) {
-                       spin_lock_bh(&bc_lock);
+                       tipc_bclink_lock();
                        bclink_accept_pkt(node, seqno);
-                       spin_unlock_bh(&bc_lock);
+                       tipc_bclink_unlock();
                        tipc_node_unlock(node);
                        if (likely(msg_mcast(msg)))
                                tipc_port_mcast_rcv(buf, NULL);
                        else
                                kfree_skb(buf);
                } else if (msg_user(msg) == MSG_BUNDLER) {
-                       spin_lock_bh(&bc_lock);
+                       tipc_bclink_lock();
                        bclink_accept_pkt(node, seqno);
                        bcl->stats.recv_bundles++;
                        bcl->stats.recv_bundled += msg_msgcnt(msg);
-                       spin_unlock_bh(&bc_lock);
+                       tipc_bclink_unlock();
                        tipc_node_unlock(node);
                        tipc_link_bundle_rcv(buf);
                } else if (msg_user(msg) == MSG_FRAGMENTER) {
-                       int ret;
-                       ret = tipc_link_frag_rcv(&node->bclink.reasm_head,
-                                                &node->bclink.reasm_tail,
-                                                &buf);
-                       if (ret == LINK_REASM_ERROR)
+                       tipc_buf_append(&node->bclink.reasm_buf, &buf);
+                       if (unlikely(!buf && !node->bclink.reasm_buf))
                                goto unlock;
-                       spin_lock_bh(&bc_lock);
+                       tipc_bclink_lock();
                        bclink_accept_pkt(node, seqno);
                        bcl->stats.recv_fragments++;
-                       if (ret == LINK_REASM_COMPLETE) {
+                       if (buf) {
                                bcl->stats.recv_fragmented++;
-                               /* Point msg to inner header */
                                msg = buf_msg(buf);
-                               spin_unlock_bh(&bc_lock);
+                               tipc_bclink_unlock();
                                goto receive;
                        }
-                       spin_unlock_bh(&bc_lock);
+                       tipc_bclink_unlock();
                        tipc_node_unlock(node);
                } else if (msg_user(msg) == NAME_DISTRIBUTOR) {
-                       spin_lock_bh(&bc_lock);
+                       tipc_bclink_lock();
                        bclink_accept_pkt(node, seqno);
-                       spin_unlock_bh(&bc_lock);
+                       tipc_bclink_unlock();
                        tipc_node_unlock(node);
                        tipc_named_rcv(buf);
                } else {
-                       spin_lock_bh(&bc_lock);
+                       tipc_bclink_lock();
                        bclink_accept_pkt(node, seqno);
-                       spin_unlock_bh(&bc_lock);
+                       tipc_bclink_unlock();
                        tipc_node_unlock(node);
                        kfree_skb(buf);
                }
@@ -552,14 +575,14 @@ receive:
        } else
                deferred = 0;
 
-       spin_lock_bh(&bc_lock);
+       tipc_bclink_lock();
 
        if (deferred)
                bcl->stats.deferred_recv++;
        else
                bcl->stats.duplicates++;
 
-       spin_unlock_bh(&bc_lock);
+       tipc_bclink_unlock();
 
 unlock:
        tipc_node_unlock(node);
@@ -627,13 +650,13 @@ static int tipc_bcbearer_send(struct sk_buff *buf, struct tipc_bearer *unused1,
 
                if (bp_index == 0) {
                        /* Use original buffer for first bearer */
-                       tipc_bearer_send(b, buf, &b->bcast_addr);
+                       tipc_bearer_send(b->identity, buf, &b->bcast_addr);
                } else {
                        /* Avoid concurrent buffer access */
                        tbuf = pskb_copy(buf, GFP_ATOMIC);
                        if (!tbuf)
                                break;
-                       tipc_bearer_send(b, tbuf, &b->bcast_addr);
+                       tipc_bearer_send(b->identity, tbuf, &b->bcast_addr);
                        kfree_skb(tbuf); /* Bearer keeps a clone */
                }
 
@@ -655,20 +678,27 @@ static int tipc_bcbearer_send(struct sk_buff *buf, struct tipc_bearer *unused1,
 /**
  * tipc_bcbearer_sort - create sets of bearer pairs used by broadcast bearer
  */
-void tipc_bcbearer_sort(void)
+void tipc_bcbearer_sort(struct tipc_node_map *nm_ptr, u32 node, bool action)
 {
        struct tipc_bcbearer_pair *bp_temp = bcbearer->bpairs_temp;
        struct tipc_bcbearer_pair *bp_curr;
+       struct tipc_bearer *b;
        int b_index;
        int pri;
 
-       spin_lock_bh(&bc_lock);
+       tipc_bclink_lock();
+
+       if (action)
+               tipc_nmap_add(nm_ptr, node);
+       else
+               tipc_nmap_remove(nm_ptr, node);
 
        /* Group bearers by priority (can assume max of two per priority) */
        memset(bp_temp, 0, sizeof(bcbearer->bpairs_temp));
 
+       rcu_read_lock();
        for (b_index = 0; b_index < MAX_BEARERS; b_index++) {
-               struct tipc_bearer *b = bearer_list[b_index];
+               b = rcu_dereference_rtnl(bearer_list[b_index]);
                if (!b || !b->nodes.count)
                        continue;
 
@@ -677,6 +707,7 @@ void tipc_bcbearer_sort(void)
                else
                        bp_temp[b->priority].secondary = b;
        }
+       rcu_read_unlock();
 
        /* Create array of bearer pairs for broadcasting */
        bp_curr = bcbearer->bpairs;
@@ -702,7 +733,7 @@ void tipc_bcbearer_sort(void)
                bp_curr++;
        }
 
-       spin_unlock_bh(&bc_lock);
+       tipc_bclink_unlock();
 }
 
 
@@ -714,7 +745,7 @@ int tipc_bclink_stats(char *buf, const u32 buf_size)
        if (!bcl)
                return 0;
 
-       spin_lock_bh(&bc_lock);
+       tipc_bclink_lock();
 
        s = &bcl->stats;
 
@@ -743,7 +774,7 @@ int tipc_bclink_stats(char *buf, const u32 buf_size)
                             s->queue_sz_counts ?
                             (s->accu_queue_sz / s->queue_sz_counts) : 0);
 
-       spin_unlock_bh(&bc_lock);
+       tipc_bclink_unlock();
        return ret;
 }
 
@@ -752,9 +783,9 @@ int tipc_bclink_reset_stats(void)
        if (!bcl)
                return -ENOPROTOOPT;
 
-       spin_lock_bh(&bc_lock);
+       tipc_bclink_lock();
        memset(&bcl->stats, 0, sizeof(bcl->stats));
-       spin_unlock_bh(&bc_lock);
+       tipc_bclink_unlock();
        return 0;
 }
 
@@ -765,46 +796,59 @@ int tipc_bclink_set_queue_limits(u32 limit)
        if ((limit < TIPC_MIN_LINK_WIN) || (limit > TIPC_MAX_LINK_WIN))
                return -EINVAL;
 
-       spin_lock_bh(&bc_lock);
+       tipc_bclink_lock();
        tipc_link_set_queue_limits(bcl, limit);
-       spin_unlock_bh(&bc_lock);
+       tipc_bclink_unlock();
        return 0;
 }
 
-void tipc_bclink_init(void)
+int tipc_bclink_init(void)
 {
+       bcbearer = kzalloc(sizeof(*bcbearer), GFP_ATOMIC);
+       if (!bcbearer)
+               return -ENOMEM;
+
+       bclink = kzalloc(sizeof(*bclink), GFP_ATOMIC);
+       if (!bclink) {
+               kfree(bcbearer);
+               return -ENOMEM;
+       }
+
+       bcl = &bclink->link;
        bcbearer->bearer.media = &bcbearer->media;
        bcbearer->media.send_msg = tipc_bcbearer_send;
        sprintf(bcbearer->media.name, "tipc-broadcast");
 
+       spin_lock_init(&bclink->lock);
        INIT_LIST_HEAD(&bcl->waiting_ports);
        bcl->next_out_no = 1;
        spin_lock_init(&bclink->node.lock);
        bcl->owner = &bclink->node;
        bcl->max_pkt = MAX_PKT_DEFAULT_MCAST;
        tipc_link_set_queue_limits(bcl, BCLINK_WIN_DEFAULT);
-       bcl->b_ptr = &bcbearer->bearer;
-       bearer_list[BCBEARER] = &bcbearer->bearer;
+       bcl->bearer_id = MAX_BEARERS;
+       rcu_assign_pointer(bearer_list[MAX_BEARERS], &bcbearer->bearer);
        bcl->state = WORKING_WORKING;
        strlcpy(bcl->name, tipc_bclink_name, TIPC_MAX_LINK_NAME);
+       return 0;
 }
 
 void tipc_bclink_stop(void)
 {
-       spin_lock_bh(&bc_lock);
+       tipc_bclink_lock();
        tipc_link_purge_queues(bcl);
-       spin_unlock_bh(&bc_lock);
+       tipc_bclink_unlock();
 
-       bearer_list[BCBEARER] = NULL;
-       memset(bclink, 0, sizeof(*bclink));
-       memset(bcbearer, 0, sizeof(*bcbearer));
+       RCU_INIT_POINTER(bearer_list[BCBEARER], NULL);
+       synchronize_net();
+       kfree(bcbearer);
+       kfree(bclink);
 }
 
-
 /**
  * tipc_nmap_add - add a node to a node map
  */
-void tipc_nmap_add(struct tipc_node_map *nm_ptr, u32 node)
+static void tipc_nmap_add(struct tipc_node_map *nm_ptr, u32 node)
 {
        int n = tipc_node(node);
        int w = n / WSIZE;
@@ -819,7 +863,7 @@ void tipc_nmap_add(struct tipc_node_map *nm_ptr, u32 node)
 /**
  * tipc_nmap_remove - remove a node from a node map
  */
-void tipc_nmap_remove(struct tipc_node_map *nm_ptr, u32 node)
+static void tipc_nmap_remove(struct tipc_node_map *nm_ptr, u32 node)
 {
        int n = tipc_node(node);
        int w = n / WSIZE;
index a80ef54b818e221a98bd9bd69a3adf28d0001f88..00330c45df3e04d03626a31d5f2ee6ba66569298 100644 (file)
@@ -39,6 +39,7 @@
 
 #define MAX_NODES 4096
 #define WSIZE 32
+#define TIPC_BCLINK_RESET 1
 
 /**
  * struct tipc_node_map - set of node identifiers
@@ -69,9 +70,6 @@ struct tipc_node;
 
 extern const char tipc_bclink_name[];
 
-void tipc_nmap_add(struct tipc_node_map *nm_ptr, u32 node);
-void tipc_nmap_remove(struct tipc_node_map *nm_ptr, u32 node);
-
 /**
  * tipc_nmap_equal - test for equality of node maps
  */
@@ -84,8 +82,9 @@ static inline int tipc_nmap_equal(struct tipc_node_map *nm_a,
 void tipc_port_list_add(struct tipc_port_list *pl_ptr, u32 port);
 void tipc_port_list_free(struct tipc_port_list *pl_ptr);
 
-void tipc_bclink_init(void);
+int tipc_bclink_init(void);
 void tipc_bclink_stop(void);
+void tipc_bclink_set_flags(unsigned int flags);
 void tipc_bclink_add_node(u32 addr);
 void tipc_bclink_remove_node(u32 addr);
 struct tipc_node *tipc_bclink_retransmit_to(void);
@@ -98,6 +97,6 @@ void tipc_bclink_update_link_state(struct tipc_node *n_ptr, u32 last_sent);
 int  tipc_bclink_stats(char *stats_buf, const u32 buf_size);
 int  tipc_bclink_reset_stats(void);
 int  tipc_bclink_set_queue_limits(u32 limit);
-void tipc_bcbearer_sort(void);
+void tipc_bcbearer_sort(struct tipc_node_map *nm_ptr, u32 node, bool action);
 
 #endif
index 3fef7eb776dc12934654b2bfa7500cfa75138ad4..264474394f9f75994205b751e734b632d21fbc02 100644 (file)
@@ -49,7 +49,7 @@ static struct tipc_media * const media_info_array[] = {
        NULL
 };
 
-struct tipc_bearer *bearer_list[MAX_BEARERS + 1];
+struct tipc_bearer __rcu *bearer_list[MAX_BEARERS + 1];
 
 static void bearer_disable(struct tipc_bearer *b_ptr, bool shutting_down);
 
@@ -178,7 +178,7 @@ struct tipc_bearer *tipc_bearer_find(const char *name)
        u32 i;
 
        for (i = 0; i < MAX_BEARERS; i++) {
-               b_ptr = bearer_list[i];
+               b_ptr = rtnl_dereference(bearer_list[i]);
                if (b_ptr && (!strcmp(b_ptr->name, name)))
                        return b_ptr;
        }
@@ -198,10 +198,9 @@ struct sk_buff *tipc_bearer_get_names(void)
        if (!buf)
                return NULL;
 
-       read_lock_bh(&tipc_net_lock);
        for (i = 0; media_info_array[i] != NULL; i++) {
                for (j = 0; j < MAX_BEARERS; j++) {
-                       b = bearer_list[j];
+                       b = rtnl_dereference(bearer_list[j]);
                        if (!b)
                                continue;
                        if (b->media == media_info_array[i]) {
@@ -211,22 +210,33 @@ struct sk_buff *tipc_bearer_get_names(void)
                        }
                }
        }
-       read_unlock_bh(&tipc_net_lock);
        return buf;
 }
 
-void tipc_bearer_add_dest(struct tipc_bearer *b_ptr, u32 dest)
+void tipc_bearer_add_dest(u32 bearer_id, u32 dest)
 {
-       tipc_nmap_add(&b_ptr->nodes, dest);
-       tipc_bcbearer_sort();
-       tipc_disc_add_dest(b_ptr->link_req);
+       struct tipc_bearer *b_ptr;
+
+       rcu_read_lock();
+       b_ptr = rcu_dereference_rtnl(bearer_list[bearer_id]);
+       if (b_ptr) {
+               tipc_bcbearer_sort(&b_ptr->nodes, dest, true);
+               tipc_disc_add_dest(b_ptr->link_req);
+       }
+       rcu_read_unlock();
 }
 
-void tipc_bearer_remove_dest(struct tipc_bearer *b_ptr, u32 dest)
+void tipc_bearer_remove_dest(u32 bearer_id, u32 dest)
 {
-       tipc_nmap_remove(&b_ptr->nodes, dest);
-       tipc_bcbearer_sort();
-       tipc_disc_remove_dest(b_ptr->link_req);
+       struct tipc_bearer *b_ptr;
+
+       rcu_read_lock();
+       b_ptr = rcu_dereference_rtnl(bearer_list[bearer_id]);
+       if (b_ptr) {
+               tipc_bcbearer_sort(&b_ptr->nodes, dest, false);
+               tipc_disc_remove_dest(b_ptr->link_req);
+       }
+       rcu_read_unlock();
 }
 
 /**
@@ -271,13 +281,11 @@ int tipc_enable_bearer(const char *name, u32 disc_domain, u32 priority)
                return -EINVAL;
        }
 
-       write_lock_bh(&tipc_net_lock);
-
        m_ptr = tipc_media_find(b_names.media_name);
        if (!m_ptr) {
                pr_warn("Bearer <%s> rejected, media <%s> not registered\n",
                        name, b_names.media_name);
-               goto exit;
+               return -EINVAL;
        }
 
        if (priority == TIPC_MEDIA_LINK_PRI)
@@ -287,7 +295,7 @@ restart:
        bearer_id = MAX_BEARERS;
        with_this_prio = 1;
        for (i = MAX_BEARERS; i-- != 0; ) {
-               b_ptr = bearer_list[i];
+               b_ptr = rtnl_dereference(bearer_list[i]);
                if (!b_ptr) {
                        bearer_id = i;
                        continue;
@@ -295,14 +303,14 @@ restart:
                if (!strcmp(name, b_ptr->name)) {
                        pr_warn("Bearer <%s> rejected, already enabled\n",
                                name);
-                       goto exit;
+                       return -EINVAL;
                }
                if ((b_ptr->priority == priority) &&
                    (++with_this_prio > 2)) {
                        if (priority-- == 0) {
                                pr_warn("Bearer <%s> rejected, duplicate priority\n",
                                        name);
-                               goto exit;
+                               return -EINVAL;
                        }
                        pr_warn("Bearer <%s> priority adjustment required %u->%u\n",
                                name, priority + 1, priority);
@@ -312,21 +320,20 @@ restart:
        if (bearer_id >= MAX_BEARERS) {
                pr_warn("Bearer <%s> rejected, bearer limit reached (%u)\n",
                        name, MAX_BEARERS);
-               goto exit;
+               return -EINVAL;
        }
 
        b_ptr = kzalloc(sizeof(*b_ptr), GFP_ATOMIC);
-       if (!b_ptr) {
-               res = -ENOMEM;
-               goto exit;
-       }
+       if (!b_ptr)
+               return -ENOMEM;
+
        strcpy(b_ptr->name, name);
        b_ptr->media = m_ptr;
        res = m_ptr->enable_media(b_ptr);
        if (res) {
                pr_warn("Bearer <%s> rejected, enable failure (%d)\n",
                        name, -res);
-               goto exit;
+               return -EINVAL;
        }
 
        b_ptr->identity = bearer_id;
@@ -341,16 +348,14 @@ restart:
                bearer_disable(b_ptr, false);
                pr_warn("Bearer <%s> rejected, discovery object creation failed\n",
                        name);
-               goto exit;
+               return -EINVAL;
        }
 
-       bearer_list[bearer_id] = b_ptr;
+       rcu_assign_pointer(bearer_list[bearer_id], b_ptr);
 
        pr_info("Enabled bearer <%s>, discovery domain %s, priority %u\n",
                name,
                tipc_addr_string_fill(addr_string, disc_domain), priority);
-exit:
-       write_unlock_bh(&tipc_net_lock);
        return res;
 }
 
@@ -359,19 +364,16 @@ exit:
  */
 static int tipc_reset_bearer(struct tipc_bearer *b_ptr)
 {
-       read_lock_bh(&tipc_net_lock);
        pr_info("Resetting bearer <%s>\n", b_ptr->name);
-       tipc_disc_delete(b_ptr->link_req);
        tipc_link_reset_list(b_ptr->identity);
-       tipc_disc_create(b_ptr, &b_ptr->bcast_addr);
-       read_unlock_bh(&tipc_net_lock);
+       tipc_disc_reset(b_ptr);
        return 0;
 }
 
 /**
  * bearer_disable
  *
- * Note: This routine assumes caller holds tipc_net_lock.
+ * Note: This routine assumes caller holds RTNL lock.
  */
 static void bearer_disable(struct tipc_bearer *b_ptr, bool shutting_down)
 {
@@ -385,12 +387,12 @@ static void bearer_disable(struct tipc_bearer *b_ptr, bool shutting_down)
                tipc_disc_delete(b_ptr->link_req);
 
        for (i = 0; i < MAX_BEARERS; i++) {
-               if (b_ptr == bearer_list[i]) {
-                       bearer_list[i] = NULL;
+               if (b_ptr == rtnl_dereference(bearer_list[i])) {
+                       RCU_INIT_POINTER(bearer_list[i], NULL);
                        break;
                }
        }
-       kfree(b_ptr);
+       kfree_rcu(b_ptr, rcu);
 }
 
 int tipc_disable_bearer(const char *name)
@@ -398,7 +400,6 @@ int tipc_disable_bearer(const char *name)
        struct tipc_bearer *b_ptr;
        int res;
 
-       write_lock_bh(&tipc_net_lock);
        b_ptr = tipc_bearer_find(name);
        if (b_ptr == NULL) {
                pr_warn("Attempt to disable unknown bearer <%s>\n", name);
@@ -407,32 +408,9 @@ int tipc_disable_bearer(const char *name)
                bearer_disable(b_ptr, false);
                res = 0;
        }
-       write_unlock_bh(&tipc_net_lock);
        return res;
 }
 
-
-/* tipc_l2_media_addr_set - initialize Ethernet media address structure
- *
- * Media-dependent "value" field stores MAC address in first 6 bytes
- * and zeroes out the remaining bytes.
- */
-void tipc_l2_media_addr_set(const struct tipc_bearer *b,
-                           struct tipc_media_addr *a, char *mac)
-{
-       int len = b->media->hwaddr_len;
-
-       if (unlikely(sizeof(a->value) < len)) {
-               WARN_ONCE(1, "Media length invalid\n");
-               return;
-       }
-
-       memcpy(a->value, mac, len);
-       memset(a->value + len, 0, sizeof(a->value) - len);
-       a->media_id = b->media->type_id;
-       a->broadcast = !memcmp(mac, b->bcast_addr.value, len);
-}
-
 int tipc_enable_l2_media(struct tipc_bearer *b)
 {
        struct net_device *dev;
@@ -443,33 +421,37 @@ int tipc_enable_l2_media(struct tipc_bearer *b)
        if (!dev)
                return -ENODEV;
 
-       /* Associate TIPC bearer with Ethernet bearer */
-       b->media_ptr = dev;
-       memset(b->bcast_addr.value, 0, sizeof(b->bcast_addr.value));
+       /* Associate TIPC bearer with L2 bearer */
+       rcu_assign_pointer(b->media_ptr, dev);
+       memset(&b->bcast_addr, 0, sizeof(b->bcast_addr));
        memcpy(b->bcast_addr.value, dev->broadcast, b->media->hwaddr_len);
        b->bcast_addr.media_id = b->media->type_id;
        b->bcast_addr.broadcast = 1;
        b->mtu = dev->mtu;
-       tipc_l2_media_addr_set(b, &b->addr, (char *)dev->dev_addr);
+       b->media->raw2addr(b, &b->addr, (char *)dev->dev_addr);
        rcu_assign_pointer(dev->tipc_ptr, b);
        return 0;
 }
 
-/* tipc_disable_l2_media - detach TIPC bearer from an Ethernet interface
+/* tipc_disable_l2_media - detach TIPC bearer from an L2 interface
  *
- * Mark Ethernet bearer as inactive so that incoming buffers are thrown away,
+ * Mark L2 bearer as inactive so that incoming buffers are thrown away,
  * then get worker thread to complete bearer cleanup.  (Can't do cleanup
  * here because cleanup code needs to sleep and caller holds spinlocks.)
  */
 void tipc_disable_l2_media(struct tipc_bearer *b)
 {
-       struct net_device *dev = (struct net_device *)b->media_ptr;
+       struct net_device *dev;
+
+       dev = (struct net_device *)rtnl_dereference(b->media_ptr);
+       RCU_INIT_POINTER(b->media_ptr, NULL);
        RCU_INIT_POINTER(dev->tipc_ptr, NULL);
+       synchronize_net();
        dev_put(dev);
 }
 
 /**
- * tipc_l2_send_msg - send a TIPC packet out over an Ethernet interface
+ * tipc_l2_send_msg - send a TIPC packet out over an L2 interface
  * @buf: the packet to be sent
  * @b_ptr: the bearer through which the packet is to be sent
  * @dest: peer destination address
@@ -478,8 +460,12 @@ int tipc_l2_send_msg(struct sk_buff *buf, struct tipc_bearer *b,
                     struct tipc_media_addr *dest)
 {
        struct sk_buff *clone;
+       struct net_device *dev;
        int delta;
-       struct net_device *dev = (struct net_device *)b->media_ptr;
+
+       dev = (struct net_device *)rcu_dereference_rtnl(b->media_ptr);
+       if (!dev)
+               return 0;
 
        clone = skb_clone(buf, GFP_ATOMIC);
        if (!clone)
@@ -507,10 +493,16 @@ int tipc_l2_send_msg(struct sk_buff *buf, struct tipc_bearer *b,
  * The media send routine must not alter the buffer being passed in
  * as it may be needed for later retransmission!
  */
-void tipc_bearer_send(struct tipc_bearer *b, struct sk_buff *buf,
+void tipc_bearer_send(u32 bearer_id, struct sk_buff *buf,
                      struct tipc_media_addr *dest)
 {
-       b->media->send_msg(buf, b, dest);
+       struct tipc_bearer *b_ptr;
+
+       rcu_read_lock();
+       b_ptr = rcu_dereference_rtnl(bearer_list[bearer_id]);
+       if (likely(b_ptr))
+               b_ptr->media->send_msg(buf, b_ptr, dest);
+       rcu_read_unlock();
 }
 
 /**
@@ -535,7 +527,7 @@ static int tipc_l2_rcv_msg(struct sk_buff *buf, struct net_device *dev,
        }
 
        rcu_read_lock();
-       b_ptr = rcu_dereference(dev->tipc_ptr);
+       b_ptr = rcu_dereference_rtnl(dev->tipc_ptr);
        if (likely(b_ptr)) {
                if (likely(buf->pkt_type <= PACKET_BROADCAST)) {
                        buf->next = NULL;
@@ -568,12 +560,9 @@ static int tipc_l2_device_event(struct notifier_block *nb, unsigned long evt,
        if (!net_eq(dev_net(dev), &init_net))
                return NOTIFY_DONE;
 
-       rcu_read_lock();
-       b_ptr = rcu_dereference(dev->tipc_ptr);
-       if (!b_ptr) {
-               rcu_read_unlock();
+       b_ptr = rtnl_dereference(dev->tipc_ptr);
+       if (!b_ptr)
                return NOTIFY_DONE;
-       }
 
        b_ptr->mtu = dev->mtu;
 
@@ -586,17 +575,15 @@ static int tipc_l2_device_event(struct notifier_block *nb, unsigned long evt,
                tipc_reset_bearer(b_ptr);
                break;
        case NETDEV_CHANGEADDR:
-               tipc_l2_media_addr_set(b_ptr, &b_ptr->addr,
+               b_ptr->media->raw2addr(b_ptr, &b_ptr->addr,
                                       (char *)dev->dev_addr);
                tipc_reset_bearer(b_ptr);
                break;
        case NETDEV_UNREGISTER:
        case NETDEV_CHANGENAME:
-               tipc_disable_bearer(b_ptr->name);
+               bearer_disable(b_ptr, false);
                break;
        }
-       rcu_read_unlock();
-
        return NOTIFY_OK;
 }
 
@@ -633,7 +620,7 @@ void tipc_bearer_stop(void)
        u32 i;
 
        for (i = 0; i < MAX_BEARERS; i++) {
-               b_ptr = bearer_list[i];
+               b_ptr = rtnl_dereference(bearer_list[i]);
                if (b_ptr) {
                        bearer_disable(b_ptr, true);
                        bearer_list[i] = NULL;
index ba48145e871dd8dcd357a193b61e9234e5ad7f0d..78fccc49de23c1e6dc20c1799d629c4083f6f80c 100644 (file)
 #define MAX_BEARERS    2
 #define MAX_MEDIA      2
 
-/*
- * Identifiers associated with TIPC message header media address info
- *
- * - address info field is 20 bytes long
- * - media type identifier located at offset 3
- * - remaining bytes vary according to media type
+/* Identifiers associated with TIPC message header media address info
+ * - address info field is 32 bytes long
+ * - the field's actual content and length is defined per media
+ * - remaining unused bytes in the field are set to zero
  */
-#define TIPC_MEDIA_ADDR_SIZE   20
+#define TIPC_MEDIA_ADDR_SIZE   32
 #define TIPC_MEDIA_TYPE_OFFSET 3
 
 /*
@@ -77,9 +75,10 @@ struct tipc_bearer;
  * @send_msg: routine which handles buffer transmission
  * @enable_media: routine which enables a media
  * @disable_media: routine which disables a media
- * @addr2str: routine which converts media address to string
- * @addr2msg: routine which converts media address to protocol message area
- * @msg2addr: routine which converts media address from protocol message area
+ * @addr2str: convert media address format to string
+ * @addr2msg: convert from media addr format to discovery msg addr format
+ * @msg2addr: convert from discovery msg addr format to media addr format
+ * @raw2addr: convert from raw addr format to media addr format
  * @priority: default link (and bearer) priority
  * @tolerance: default time (in ms) before declaring link failure
  * @window: default window (in packets) before declaring link congestion
@@ -93,10 +92,16 @@ struct tipc_media {
                        struct tipc_media_addr *dest);
        int (*enable_media)(struct tipc_bearer *b_ptr);
        void (*disable_media)(struct tipc_bearer *b_ptr);
-       int (*addr2str)(struct tipc_media_addr *a, char *str_buf, int str_size);
-       int (*addr2msg)(struct tipc_media_addr *a, char *msg_area);
-       int (*msg2addr)(const struct tipc_bearer *b_ptr,
-                       struct tipc_media_addr *a, char *msg_area);
+       int (*addr2str)(struct tipc_media_addr *addr,
+                       char *strbuf,
+                       int bufsz);
+       int (*addr2msg)(char *msg, struct tipc_media_addr *addr);
+       int (*msg2addr)(struct tipc_bearer *b,
+                       struct tipc_media_addr *addr,
+                       char *msg);
+       int (*raw2addr)(struct tipc_bearer *b,
+                       struct tipc_media_addr *addr,
+                       char *raw);
        u32 priority;
        u32 tolerance;
        u32 window;
@@ -113,6 +118,7 @@ struct tipc_media {
  * @name: bearer name (format = media:interface)
  * @media: ptr to media structure associated with bearer
  * @bcast_addr: media address used in broadcasting
+ * @rcu: rcu struct for tipc_bearer
  * @priority: default link priority for bearer
  * @window: default window size for bearer
  * @tolerance: default link tolerance for bearer
@@ -127,12 +133,13 @@ struct tipc_media {
  * care of initializing all other fields.
  */
 struct tipc_bearer {
-       void *media_ptr;                        /* initalized by media */
+       void __rcu *media_ptr;                  /* initalized by media */
        u32 mtu;                                /* initalized by media */
        struct tipc_media_addr addr;            /* initalized by media */
        char name[TIPC_MAX_BEARER_NAME];
        struct tipc_media *media;
        struct tipc_media_addr bcast_addr;
+       struct rcu_head rcu;
        u32 priority;
        u32 window;
        u32 tolerance;
@@ -150,7 +157,7 @@ struct tipc_bearer_names {
 
 struct tipc_link;
 
-extern struct tipc_bearer *bearer_list[];
+extern struct tipc_bearer __rcu *bearer_list[];
 
 /*
  * TIPC routines available to supported media types
@@ -173,22 +180,20 @@ int tipc_media_set_priority(const char *name, u32 new_value);
 int tipc_media_set_window(const char *name, u32 new_value);
 void tipc_media_addr_printf(char *buf, int len, struct tipc_media_addr *a);
 struct sk_buff *tipc_media_get_names(void);
-void tipc_l2_media_addr_set(const struct tipc_bearer *b,
-                           struct tipc_media_addr *a, char *mac);
 int tipc_enable_l2_media(struct tipc_bearer *b);
 void tipc_disable_l2_media(struct tipc_bearer *b);
 int tipc_l2_send_msg(struct sk_buff *buf, struct tipc_bearer *b,
                     struct tipc_media_addr *dest);
 
 struct sk_buff *tipc_bearer_get_names(void);
-void tipc_bearer_add_dest(struct tipc_bearer *b_ptr, u32 dest);
-void tipc_bearer_remove_dest(struct tipc_bearer *b_ptr, u32 dest);
+void tipc_bearer_add_dest(u32 bearer_id, u32 dest);
+void tipc_bearer_remove_dest(u32 bearer_id, u32 dest);
 struct tipc_bearer *tipc_bearer_find(const char *name);
 struct tipc_media *tipc_media_find(const char *name);
 int tipc_bearer_setup(void);
 void tipc_bearer_cleanup(void);
 void tipc_bearer_stop(void);
-void tipc_bearer_send(struct tipc_bearer *b, struct sk_buff *buf,
+void tipc_bearer_send(u32 bearer_id, struct sk_buff *buf,
                      struct tipc_media_addr *dest);
 
 #endif /* _TIPC_BEARER_H */
index 4b981c053823e90cc31963277aedd8c3682bc1a0..2b42403ad33a690221456ff25fb4be50a2235255 100644 (file)
@@ -42,8 +42,6 @@
 
 #define REPLY_TRUNCATED "<truncated>\n"
 
-static DEFINE_MUTEX(config_mutex);
-
 static const void *req_tlv_area;       /* request message TLV area */
 static int req_tlv_space;              /* request message TLV area size */
 static int rep_headroom;               /* reply message headroom to use */
@@ -179,8 +177,10 @@ static struct sk_buff *cfg_set_own_addr(void)
        if (tipc_own_addr)
                return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
                                                   " (cannot change node address once assigned)");
-       tipc_net_start(addr);
-       return tipc_cfg_reply_none();
+       if (!tipc_net_start(addr))
+               return tipc_cfg_reply_none();
+
+       return tipc_cfg_reply_error_string("cannot change to network mode");
 }
 
 static struct sk_buff *cfg_set_max_ports(void)
@@ -223,7 +223,7 @@ struct sk_buff *tipc_cfg_do_cmd(u32 orig_node, u16 cmd, const void *request_area
 {
        struct sk_buff *rep_tlv_buf;
 
-       mutex_lock(&config_mutex);
+       rtnl_lock();
 
        /* Save request and reply details in a well-known location */
        req_tlv_area = request_area;
@@ -337,6 +337,6 @@ struct sk_buff *tipc_cfg_do_cmd(u32 orig_node, u16 cmd, const void *request_area
 
        /* Return reply buffer */
 exit:
-       mutex_unlock(&config_mutex);
+       rtnl_unlock();
        return rep_tlv_buf;
 }
index 50d57429ebcaf82b8d36bcf49f6fa1585664180a..676d18015dd82efa0346f6bed2bf0d7f5489f1f6 100644 (file)
@@ -80,7 +80,6 @@ struct sk_buff *tipc_buf_acquire(u32 size)
  */
 static void tipc_core_stop(void)
 {
-       tipc_handler_stop();
        tipc_net_stop();
        tipc_bearer_cleanup();
        tipc_netlink_stop();
@@ -100,10 +99,6 @@ static int tipc_core_start(void)
 
        get_random_bytes(&tipc_random, sizeof(tipc_random));
 
-       err = tipc_handler_start();
-       if (err)
-               goto out_handler;
-
        err = tipc_ref_table_init(tipc_max_ports, tipc_random);
        if (err)
                goto out_reftbl;
@@ -146,8 +141,6 @@ out_netlink:
 out_nametbl:
        tipc_ref_table_stop();
 out_reftbl:
-       tipc_handler_stop();
-out_handler:
        return err;
 }
 
@@ -161,10 +154,11 @@ static int __init tipc_init(void)
        tipc_max_ports = CONFIG_TIPC_PORTS;
        tipc_net_id = 4711;
 
-       sysctl_tipc_rmem[0] = CONN_OVERLOAD_LIMIT >> 4 << TIPC_LOW_IMPORTANCE;
-       sysctl_tipc_rmem[1] = CONN_OVERLOAD_LIMIT >> 4 <<
+       sysctl_tipc_rmem[0] = TIPC_CONN_OVERLOAD_LIMIT >> 4 <<
+                             TIPC_LOW_IMPORTANCE;
+       sysctl_tipc_rmem[1] = TIPC_CONN_OVERLOAD_LIMIT >> 4 <<
                              TIPC_CRITICAL_IMPORTANCE;
-       sysctl_tipc_rmem[2] = CONN_OVERLOAD_LIMIT;
+       sysctl_tipc_rmem[2] = TIPC_CONN_OVERLOAD_LIMIT;
 
        res = tipc_core_start();
        if (res)
index 8985bbcb942bdb3d6ef839c3249d4e547c2f75ce..bb26ed1ee966c84c66fc7322877d80f763047e5a 100644 (file)
@@ -56,7 +56,8 @@
 #include <linux/list.h>
 #include <linux/slab.h>
 #include <linux/vmalloc.h>
-
+#include <linux/rtnetlink.h>
+#include <linux/etherdevice.h>
 
 #define TIPC_MOD_VER "2.0.0"
 
@@ -89,8 +90,6 @@ extern int tipc_random __read_mostly;
 /*
  * Routines available to privileged subsystems
  */
-int tipc_handler_start(void);
-void tipc_handler_stop(void);
 int tipc_netlink_start(void);
 void tipc_netlink_stop(void);
 int tipc_socket_init(void);
@@ -109,12 +108,10 @@ void tipc_unregister_sysctl(void);
 #endif
 
 /*
- * TIPC timer and signal code
+ * TIPC timer code
  */
 typedef void (*Handler) (unsigned long);
 
-u32 tipc_k_signal(Handler routine, unsigned long argument);
-
 /**
  * k_init_timer - initialize a timer
  * @timer: pointer to timer structure
@@ -191,6 +188,7 @@ static inline void k_term_timer(struct timer_list *timer)
 struct tipc_skb_cb {
        void *handle;
        bool deferred;
+       struct sk_buff *tail;
 };
 
 #define TIPC_SKB_CB(__skb) ((struct tipc_skb_cb *)&((__skb)->cb[0]))
index 542fe3413dc4e8d06d97eb2d5a409d2a7b26459a..aa722a42ef8b03b4d840e31bbf7582d51948fbae 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * net/tipc/discover.c
  *
- * Copyright (c) 2003-2006, Ericsson AB
+ * Copyright (c) 2003-2006, 2014, Ericsson AB
  * Copyright (c) 2005-2006, 2010-2011, Wind River Systems
  * All rights reserved.
  *
@@ -46,8 +46,9 @@
 
 /**
  * struct tipc_link_req - information about an ongoing link setup request
- * @bearer: bearer issuing requests
+ * @bearer_id: identity of bearer issuing requests
  * @dest: destination address for request messages
+ * @domain: network domain to which links can be established
  * @num_nodes: number of nodes currently discovered (i.e. with an active link)
  * @lock: spinlock for controlling access to requests
  * @buf: request message to be (repeatedly) sent
@@ -55,8 +56,9 @@
  * @timer_intv: current interval between requests (in ms)
  */
 struct tipc_link_req {
-       struct tipc_bearer *bearer;
+       u32 bearer_id;
        struct tipc_media_addr dest;
+       u32 domain;
        int num_nodes;
        spinlock_t lock;
        struct sk_buff *buf;
@@ -69,22 +71,19 @@ struct tipc_link_req {
  * @type: message type (request or response)
  * @b_ptr: ptr to bearer issuing message
  */
-static struct sk_buff *tipc_disc_init_msg(u32 type, struct tipc_bearer *b_ptr)
+static void tipc_disc_init_msg(struct sk_buff *buf, u32 type,
+                              struct tipc_bearer *b_ptr)
 {
-       struct sk_buff *buf = tipc_buf_acquire(INT_H_SIZE);
        struct tipc_msg *msg;
        u32 dest_domain = b_ptr->domain;
 
-       if (buf) {
-               msg = buf_msg(buf);
-               tipc_msg_init(msg, LINK_CONFIG, type, INT_H_SIZE, dest_domain);
-               msg_set_non_seq(msg, 1);
-               msg_set_node_sig(msg, tipc_random);
-               msg_set_dest_domain(msg, dest_domain);
-               msg_set_bc_netid(msg, tipc_net_id);
-               b_ptr->media->addr2msg(&b_ptr->addr, msg_media_addr(msg));
-       }
-       return buf;
+       msg = buf_msg(buf);
+       tipc_msg_init(msg, LINK_CONFIG, type, INT_H_SIZE, dest_domain);
+       msg_set_non_seq(msg, 1);
+       msg_set_node_sig(msg, tipc_random);
+       msg_set_dest_domain(msg, dest_domain);
+       msg_set_bc_netid(msg, tipc_net_id);
+       b_ptr->media->addr2msg(msg_media_addr(msg), &b_ptr->addr);
 }
 
 /**
@@ -107,146 +106,150 @@ static void disc_dupl_alert(struct tipc_bearer *b_ptr, u32 node_addr,
 }
 
 /**
- * tipc_disc_rcv - handle incoming link setup message (request or response)
+ * tipc_disc_rcv - handle incoming discovery message (request or response)
  * @buf: buffer containing message
- * @b_ptr: bearer that message arrived on
+ * @bearer: bearer that message arrived on
  */
-void tipc_disc_rcv(struct sk_buff *buf, struct tipc_bearer *b_ptr)
+void tipc_disc_rcv(struct sk_buff *buf, struct tipc_bearer *bearer)
 {
-       struct tipc_node *n_ptr;
+       struct tipc_node *node;
        struct tipc_link *link;
-       struct tipc_media_addr media_addr;
+       struct tipc_media_addr maddr;
        struct sk_buff *rbuf;
        struct tipc_msg *msg = buf_msg(buf);
-       u32 dest = msg_dest_domain(msg);
-       u32 orig = msg_prevnode(msg);
+       u32 ddom = msg_dest_domain(msg);
+       u32 onode = msg_prevnode(msg);
        u32 net_id = msg_bc_netid(msg);
-       u32 type = msg_type(msg);
+       u32 mtyp = msg_type(msg);
        u32 signature = msg_node_sig(msg);
-       int addr_mismatch;
-       int link_fully_up;
-
-       media_addr.broadcast = 1;
-       b_ptr->media->msg2addr(b_ptr, &media_addr, msg_media_addr(msg));
+       bool addr_match = false;
+       bool sign_match = false;
+       bool link_up = false;
+       bool accept_addr = false;
+       bool accept_sign = false;
+       bool respond = false;
+
+       bearer->media->msg2addr(bearer, &maddr, msg_media_addr(msg));
        kfree_skb(buf);
 
        /* Ensure message from node is valid and communication is permitted */
        if (net_id != tipc_net_id)
                return;
-       if (media_addr.broadcast)
+       if (maddr.broadcast)
                return;
-       if (!tipc_addr_domain_valid(dest))
+       if (!tipc_addr_domain_valid(ddom))
                return;
-       if (!tipc_addr_node_valid(orig))
+       if (!tipc_addr_node_valid(onode))
                return;
-       if (orig == tipc_own_addr) {
-               if (memcmp(&media_addr, &b_ptr->addr, sizeof(media_addr)))
-                       disc_dupl_alert(b_ptr, tipc_own_addr, &media_addr);
+
+       if (in_own_node(onode)) {
+               if (memcmp(&maddr, &bearer->addr, sizeof(maddr)))
+                       disc_dupl_alert(bearer, tipc_own_addr, &maddr);
                return;
        }
-       if (!tipc_in_scope(dest, tipc_own_addr))
+       if (!tipc_in_scope(ddom, tipc_own_addr))
                return;
-       if (!tipc_in_scope(b_ptr->domain, orig))
+       if (!tipc_in_scope(bearer->domain, onode))
                return;
 
-       /* Locate structure corresponding to requesting node */
-       n_ptr = tipc_node_find(orig);
-       if (!n_ptr) {
-               n_ptr = tipc_node_create(orig);
-               if (!n_ptr)
-                       return;
-       }
-       tipc_node_lock(n_ptr);
+       /* Locate, or if necessary, create, node: */
+       node = tipc_node_find(onode);
+       if (!node)
+               node = tipc_node_create(onode);
+       if (!node)
+               return;
 
-       /* Prepare to validate requesting node's signature and media address */
-       link = n_ptr->links[b_ptr->identity];
-       addr_mismatch = (link != NULL) &&
-               memcmp(&link->media_addr, &media_addr, sizeof(media_addr));
+       tipc_node_lock(node);
+       link = node->links[bearer->identity];
 
-       /*
-        * Ensure discovery message's signature is correct
-        *
-        * If signature is incorrect and there is no working link to the node,
-        * accept the new signature but invalidate all existing links to the
-        * node so they won't re-activate without a new discovery message.
-        *
-        * If signature is incorrect and the requested link to the node is
-        * working, accept the new signature. (This is an instance of delayed
-        * rediscovery, where a link endpoint was able to re-establish contact
-        * with its peer endpoint on a node that rebooted before receiving a
-        * discovery message from that node.)
-        *
-        * If signature is incorrect and there is a working link to the node
-        * that is not the requested link, reject the request (must be from
-        * a duplicate node).
-        */
-       if (signature != n_ptr->signature) {
-               if (n_ptr->working_links == 0) {
-                       struct tipc_link *curr_link;
-                       int i;
-
-                       for (i = 0; i < MAX_BEARERS; i++) {
-                               curr_link = n_ptr->links[i];
-                               if (curr_link) {
-                                       memset(&curr_link->media_addr, 0,
-                                              sizeof(media_addr));
-                                       tipc_link_reset(curr_link);
-                               }
-                       }
-                       addr_mismatch = (link != NULL);
-               } else if (tipc_link_is_up(link) && !addr_mismatch) {
-                       /* delayed rediscovery */
-               } else {
-                       disc_dupl_alert(b_ptr, orig, &media_addr);
-                       tipc_node_unlock(n_ptr);
-                       return;
-               }
-               n_ptr->signature = signature;
+       /* Prepare to validate requesting node's signature and media address */
+       sign_match = (signature == node->signature);
+       addr_match = link && !memcmp(&link->media_addr, &maddr, sizeof(maddr));
+       link_up = link && tipc_link_is_up(link);
+
+
+       /* These three flags give us eight permutations: */
+
+       if (sign_match && addr_match && link_up) {
+               /* All is fine. Do nothing. */
+       } else if (sign_match && addr_match && !link_up) {
+               /* Respond. The link will come up in due time */
+               respond = true;
+       } else if (sign_match && !addr_match && link_up) {
+               /* Peer has changed i/f address without rebooting.
+                * If so, the link will reset soon, and the next
+                * discovery will be accepted. So we can ignore it.
+                * It may also be an cloned or malicious peer having
+                * chosen the same node address and signature as an
+                * existing one.
+                * Ignore requests until the link goes down, if ever.
+                */
+               disc_dupl_alert(bearer, onode, &maddr);
+       } else if (sign_match && !addr_match && !link_up) {
+               /* Peer link has changed i/f address without rebooting.
+                * It may also be a cloned or malicious peer; we can't
+                * distinguish between the two.
+                * The signature is correct, so we must accept.
+                */
+               accept_addr = true;
+               respond = true;
+       } else if (!sign_match && addr_match && link_up) {
+               /* Peer node rebooted. Two possibilities:
+                *  - Delayed re-discovery; this link endpoint has already
+                *    reset and re-established contact with the peer, before
+                *    receiving a discovery message from that node.
+                *    (The peer happened to receive one from this node first).
+                *  - The peer came back so fast that our side has not
+                *    discovered it yet. Probing from this side will soon
+                *    reset the link, since there can be no working link
+                *    endpoint at the peer end, and the link will re-establish.
+                *  Accept the signature, since it comes from a known peer.
+                */
+               accept_sign = true;
+       } else if (!sign_match && addr_match && !link_up) {
+               /*  The peer node has rebooted.
+                *  Accept signature, since it is a known peer.
+                */
+               accept_sign = true;
+               respond = true;
+       } else if (!sign_match && !addr_match && link_up) {
+               /* Peer rebooted with new address, or a new/duplicate peer.
+                * Ignore until the link goes down, if ever.
+                */
+               disc_dupl_alert(bearer, onode, &maddr);
+       } else if (!sign_match && !addr_match && !link_up) {
+               /* Peer rebooted with new address, or it is a new peer.
+                * Accept signature and address.
+               */
+               accept_sign = true;
+               accept_addr = true;
+               respond = true;
        }
 
-       /*
-        * Ensure requesting node's media address is correct
-        *
-        * If media address doesn't match and the link is working, reject the
-        * request (must be from a duplicate node).
-        *
-        * If media address doesn't match and the link is not working, accept
-        * the new media address and reset the link to ensure it starts up
-        * cleanly.
-        */
-       if (addr_mismatch) {
-               if (tipc_link_is_up(link)) {
-                       disc_dupl_alert(b_ptr, orig, &media_addr);
-                       tipc_node_unlock(n_ptr);
-                       return;
-               } else {
-                       memcpy(&link->media_addr, &media_addr,
-                              sizeof(media_addr));
-                       tipc_link_reset(link);
-               }
-       }
+       if (accept_sign)
+               node->signature = signature;
 
-       /* Create a link endpoint for this bearer, if necessary */
-       if (!link) {
-               link = tipc_link_create(n_ptr, b_ptr, &media_addr);
-               if (!link) {
-                       tipc_node_unlock(n_ptr);
-                       return;
+       if (accept_addr) {
+               if (!link)
+                       link = tipc_link_create(node, bearer, &maddr);
+               if (link) {
+                       memcpy(&link->media_addr, &maddr, sizeof(maddr));
+                       tipc_link_reset(link);
+               } else {
+                       respond = false;
                }
        }
 
-       /* Accept discovery message & send response, if necessary */
-       link_fully_up = link_working_working(link);
-
-       if ((type == DSC_REQ_MSG) && !link_fully_up) {
-               rbuf = tipc_disc_init_msg(DSC_RESP_MSG, b_ptr);
+       /* Send response, if necessary */
+       if (respond && (mtyp == DSC_REQ_MSG)) {
+               rbuf = tipc_buf_acquire(INT_H_SIZE);
                if (rbuf) {
-                       tipc_bearer_send(b_ptr, rbuf, &media_addr);
+                       tipc_disc_init_msg(rbuf, DSC_RESP_MSG, bearer);
+                       tipc_bearer_send(bearer->identity, rbuf, &maddr);
                        kfree_skb(rbuf);
                }
        }
-
-       tipc_node_unlock(n_ptr);
+       tipc_node_unlock(node);
 }
 
 /**
@@ -303,7 +306,7 @@ static void disc_timeout(struct tipc_link_req *req)
        spin_lock_bh(&req->lock);
 
        /* Stop searching if only desired node has been found */
-       if (tipc_node(req->bearer->domain) && req->num_nodes) {
+       if (tipc_node(req->domain) && req->num_nodes) {
                req->timer_intv = TIPC_LINK_REQ_INACTIVE;
                goto exit;
        }
@@ -315,7 +318,7 @@ static void disc_timeout(struct tipc_link_req *req)
         * hold at fast polling rate if don't have any associated nodes,
         * otherwise hold at slow polling rate
         */
-       tipc_bearer_send(req->bearer, req->buf, &req->dest);
+       tipc_bearer_send(req->bearer_id, req->buf, &req->dest);
 
 
        req->timer_intv *= 2;
@@ -347,21 +350,23 @@ int tipc_disc_create(struct tipc_bearer *b_ptr, struct tipc_media_addr *dest)
        if (!req)
                return -ENOMEM;
 
-       req->buf = tipc_disc_init_msg(DSC_REQ_MSG, b_ptr);
+       req->buf = tipc_buf_acquire(INT_H_SIZE);
        if (!req->buf) {
                kfree(req);
-               return -ENOMSG;
+               return -ENOMEM;
        }
 
+       tipc_disc_init_msg(req->buf, DSC_REQ_MSG, b_ptr);
        memcpy(&req->dest, dest, sizeof(*dest));
-       req->bearer = b_ptr;
+       req->bearer_id = b_ptr->identity;
+       req->domain = b_ptr->domain;
        req->num_nodes = 0;
        req->timer_intv = TIPC_LINK_REQ_INIT;
        spin_lock_init(&req->lock);
        k_init_timer(&req->timer, (Handler)disc_timeout, (unsigned long)req);
        k_start_timer(&req->timer, req->timer_intv);
        b_ptr->link_req = req;
-       tipc_bearer_send(req->bearer, req->buf, &req->dest);
+       tipc_bearer_send(req->bearer_id, req->buf, &req->dest);
        return 0;
 }
 
@@ -376,3 +381,23 @@ void tipc_disc_delete(struct tipc_link_req *req)
        kfree_skb(req->buf);
        kfree(req);
 }
+
+/**
+ * tipc_disc_reset - reset object to send periodic link setup requests
+ * @b_ptr: ptr to bearer issuing requests
+ * @dest_domain: network domain to which links can be established
+ */
+void tipc_disc_reset(struct tipc_bearer *b_ptr)
+{
+       struct tipc_link_req *req = b_ptr->link_req;
+
+       spin_lock_bh(&req->lock);
+       tipc_disc_init_msg(req->buf, DSC_REQ_MSG, b_ptr);
+       req->bearer_id = b_ptr->identity;
+       req->domain = b_ptr->domain;
+       req->num_nodes = 0;
+       req->timer_intv = TIPC_LINK_REQ_INIT;
+       k_start_timer(&req->timer, req->timer_intv);
+       tipc_bearer_send(req->bearer_id, req->buf, &req->dest);
+       spin_unlock_bh(&req->lock);
+}
index 07f34729459dcacb93b71d8a56c69263db5f563b..515b57392f4d881b567d6d29cc7677bfece5e4c7 100644 (file)
@@ -41,6 +41,7 @@ struct tipc_link_req;
 
 int tipc_disc_create(struct tipc_bearer *b_ptr, struct tipc_media_addr *dest);
 void tipc_disc_delete(struct tipc_link_req *req);
+void tipc_disc_reset(struct tipc_bearer *b_ptr);
 void tipc_disc_add_dest(struct tipc_link_req *req);
 void tipc_disc_remove_dest(struct tipc_link_req *req);
 void tipc_disc_rcv(struct sk_buff *buf, struct tipc_bearer *b_ptr);
index 67cf3f935dba0a9e4d0141fc0406a93b1aeb60d6..5e1426f1751f146cf3350983e9d0c04d218da850 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * net/tipc/eth_media.c: Ethernet bearer support for TIPC
  *
- * Copyright (c) 2001-2007, 2013, Ericsson AB
+ * Copyright (c) 2001-2007, 2013-2014, Ericsson AB
  * Copyright (c) 2005-2008, 2011-2013, Wind River Systems
  * All rights reserved.
  *
 #include "core.h"
 #include "bearer.h"
 
-#define ETH_ADDR_OFFSET        4       /* message header offset of MAC address */
+#define ETH_ADDR_OFFSET  4  /* MAC addr position inside address field */
 
-/* convert Ethernet address to string */
-static int tipc_eth_addr2str(struct tipc_media_addr *a, char *str_buf,
-                            int str_size)
+/* Convert Ethernet address (media address format) to string */
+static int tipc_eth_addr2str(struct tipc_media_addr *addr,
+                            char *strbuf, int bufsz)
 {
-       if (str_size < 18)      /* 18 = strlen("aa:bb:cc:dd:ee:ff\0") */
+       if (bufsz < 18) /* 18 = strlen("aa:bb:cc:dd:ee:ff\0") */
                return 1;
 
-       sprintf(str_buf, "%pM", a->value);
+       sprintf(strbuf, "%pM", addr->value);
        return 0;
 }
 
-/* convert Ethernet address format to message header format */
-static int tipc_eth_addr2msg(struct tipc_media_addr *a, char *msg_area)
+/* Convert from media address format to discovery message addr format */
+static int tipc_eth_addr2msg(char *msg, struct tipc_media_addr *addr)
 {
-       memset(msg_area, 0, TIPC_MEDIA_ADDR_SIZE);
-       msg_area[TIPC_MEDIA_TYPE_OFFSET] = TIPC_MEDIA_TYPE_ETH;
-       memcpy(msg_area + ETH_ADDR_OFFSET, a->value, ETH_ALEN);
+       memset(msg, 0, TIPC_MEDIA_ADDR_SIZE);
+       msg[TIPC_MEDIA_TYPE_OFFSET] = TIPC_MEDIA_TYPE_ETH;
+       memcpy(msg + ETH_ADDR_OFFSET, addr->value, ETH_ALEN);
        return 0;
 }
 
-/* convert message header address format to Ethernet format */
-static int tipc_eth_msg2addr(const struct tipc_bearer *tb_ptr,
-                            struct tipc_media_addr *a, char *msg_area)
+/* Convert raw mac address format to media addr format */
+static int tipc_eth_raw2addr(struct tipc_bearer *b,
+                            struct tipc_media_addr *addr,
+                            char *msg)
 {
-       if (msg_area[TIPC_MEDIA_TYPE_OFFSET] != TIPC_MEDIA_TYPE_ETH)
-               return 1;
+       char bcast_mac[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
 
-       tipc_l2_media_addr_set(tb_ptr, a, msg_area + ETH_ADDR_OFFSET);
+       memset(addr, 0, sizeof(*addr));
+       ether_addr_copy(addr->value, msg);
+       addr->media_id = TIPC_MEDIA_TYPE_ETH;
+       addr->broadcast = !memcmp(addr->value, bcast_mac, ETH_ALEN);
        return 0;
 }
 
+/* Convert discovery msg addr format to Ethernet media addr format */
+static int tipc_eth_msg2addr(struct tipc_bearer *b,
+                            struct tipc_media_addr *addr,
+                            char *msg)
+{
+       /* Skip past preamble: */
+       msg += ETH_ADDR_OFFSET;
+       return tipc_eth_raw2addr(b, addr, msg);
+}
+
 /* Ethernet media registration info */
 struct tipc_media eth_media_info = {
        .send_msg       = tipc_l2_send_msg,
@@ -78,6 +91,7 @@ struct tipc_media eth_media_info = {
        .addr2str       = tipc_eth_addr2str,
        .addr2msg       = tipc_eth_addr2msg,
        .msg2addr       = tipc_eth_msg2addr,
+       .raw2addr       = tipc_eth_raw2addr,
        .priority       = TIPC_DEF_LINK_PRI,
        .tolerance      = TIPC_DEF_LINK_TOL,
        .window         = TIPC_DEF_LINK_WIN,
@@ -85,4 +99,3 @@ struct tipc_media eth_media_info = {
        .hwaddr_len     = ETH_ALEN,
        .name           = "eth"
 };
-
diff --git a/net/tipc/handler.c b/net/tipc/handler.c
deleted file mode 100644 (file)
index 1fabf16..0000000
+++ /dev/null
@@ -1,134 +0,0 @@
-/*
- * net/tipc/handler.c: TIPC signal handling
- *
- * Copyright (c) 2000-2006, Ericsson AB
- * Copyright (c) 2005, Wind River Systems
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- *    contributors may be used to endorse or promote products derived from
- *    this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "core.h"
-
-struct queue_item {
-       struct list_head next_signal;
-       void (*handler) (unsigned long);
-       unsigned long data;
-};
-
-static struct kmem_cache *tipc_queue_item_cache;
-static struct list_head signal_queue_head;
-static DEFINE_SPINLOCK(qitem_lock);
-static int handler_enabled __read_mostly;
-
-static void process_signal_queue(unsigned long dummy);
-
-static DECLARE_TASKLET_DISABLED(tipc_tasklet, process_signal_queue, 0);
-
-
-unsigned int tipc_k_signal(Handler routine, unsigned long argument)
-{
-       struct queue_item *item;
-
-       spin_lock_bh(&qitem_lock);
-       if (!handler_enabled) {
-               spin_unlock_bh(&qitem_lock);
-               return -ENOPROTOOPT;
-       }
-
-       item = kmem_cache_alloc(tipc_queue_item_cache, GFP_ATOMIC);
-       if (!item) {
-               pr_err("Signal queue out of memory\n");
-               spin_unlock_bh(&qitem_lock);
-               return -ENOMEM;
-       }
-       item->handler = routine;
-       item->data = argument;
-       list_add_tail(&item->next_signal, &signal_queue_head);
-       spin_unlock_bh(&qitem_lock);
-       tasklet_schedule(&tipc_tasklet);
-       return 0;
-}
-
-static void process_signal_queue(unsigned long dummy)
-{
-       struct queue_item *__volatile__ item;
-       struct list_head *l, *n;
-
-       spin_lock_bh(&qitem_lock);
-       list_for_each_safe(l, n, &signal_queue_head) {
-               item = list_entry(l, struct queue_item, next_signal);
-               list_del(&item->next_signal);
-               spin_unlock_bh(&qitem_lock);
-               item->handler(item->data);
-               spin_lock_bh(&qitem_lock);
-               kmem_cache_free(tipc_queue_item_cache, item);
-       }
-       spin_unlock_bh(&qitem_lock);
-}
-
-int tipc_handler_start(void)
-{
-       tipc_queue_item_cache =
-               kmem_cache_create("tipc_queue_items", sizeof(struct queue_item),
-                                 0, SLAB_HWCACHE_ALIGN, NULL);
-       if (!tipc_queue_item_cache)
-               return -ENOMEM;
-
-       INIT_LIST_HEAD(&signal_queue_head);
-       tasklet_enable(&tipc_tasklet);
-       handler_enabled = 1;
-       return 0;
-}
-
-void tipc_handler_stop(void)
-{
-       struct list_head *l, *n;
-       struct queue_item *item;
-
-       spin_lock_bh(&qitem_lock);
-       if (!handler_enabled) {
-               spin_unlock_bh(&qitem_lock);
-               return;
-       }
-       handler_enabled = 0;
-       spin_unlock_bh(&qitem_lock);
-
-       tasklet_kill(&tipc_tasklet);
-
-       spin_lock_bh(&qitem_lock);
-       list_for_each_safe(l, n, &signal_queue_head) {
-               item = list_entry(l, struct queue_item, next_signal);
-               list_del(&item->next_signal);
-               kmem_cache_free(tipc_queue_item_cache, item);
-       }
-       spin_unlock_bh(&qitem_lock);
-
-       kmem_cache_destroy(tipc_queue_item_cache);
-}
index 844a77e2582856ae8cff4618c5e1de23f4220e0b..8522eef9c136bc25d39e166b32dfc459881d77c9 100644 (file)
@@ -42,7 +42,7 @@
 #include "core.h"
 #include "bearer.h"
 
-/* convert InfiniBand address to string */
+/* convert InfiniBand address (media address format) media address to string */
 static int tipc_ib_addr2str(struct tipc_media_addr *a, char *str_buf,
                            int str_size)
 {
@@ -54,23 +54,35 @@ static int tipc_ib_addr2str(struct tipc_media_addr *a, char *str_buf,
        return 0;
 }
 
-/* convert InfiniBand address format to message header format */
-static int tipc_ib_addr2msg(struct tipc_media_addr *a, char *msg_area)
+/* Convert from media address format to discovery message addr format */
+static int tipc_ib_addr2msg(char *msg, struct tipc_media_addr *addr)
 {
-       memset(msg_area, 0, TIPC_MEDIA_ADDR_SIZE);
-       msg_area[TIPC_MEDIA_TYPE_OFFSET] = TIPC_MEDIA_TYPE_IB;
-       memcpy(msg_area, a->value, INFINIBAND_ALEN);
+       memset(msg, 0, TIPC_MEDIA_ADDR_SIZE);
+       memcpy(msg, addr->value, INFINIBAND_ALEN);
        return 0;
 }
 
-/* convert message header address format to InfiniBand format */
-static int tipc_ib_msg2addr(const struct tipc_bearer *tb_ptr,
-                           struct tipc_media_addr *a, char *msg_area)
+/* Convert raw InfiniBand address format to media addr format */
+static int tipc_ib_raw2addr(struct tipc_bearer *b,
+                           struct tipc_media_addr *addr,
+                           char *msg)
 {
-       tipc_l2_media_addr_set(tb_ptr, a, msg_area);
+       memset(addr, 0, sizeof(*addr));
+       memcpy(addr->value, msg, INFINIBAND_ALEN);
+       addr->media_id = TIPC_MEDIA_TYPE_IB;
+       addr->broadcast = !memcmp(msg, b->bcast_addr.value,
+                                 INFINIBAND_ALEN);
        return 0;
 }
 
+/* Convert discovery msg addr format to InfiniBand media addr format */
+static int tipc_ib_msg2addr(struct tipc_bearer *b,
+                           struct tipc_media_addr *addr,
+                           char *msg)
+{
+       return tipc_ib_raw2addr(b, addr, msg);
+}
+
 /* InfiniBand media registration info */
 struct tipc_media ib_media_info = {
        .send_msg       = tipc_l2_send_msg,
@@ -79,6 +91,7 @@ struct tipc_media ib_media_info = {
        .addr2str       = tipc_ib_addr2str,
        .addr2msg       = tipc_ib_addr2msg,
        .msg2addr       = tipc_ib_msg2addr,
+       .raw2addr       = tipc_ib_raw2addr,
        .priority       = TIPC_DEF_LINK_PRI,
        .tolerance      = TIPC_DEF_LINK_TOL,
        .window         = TIPC_DEF_LINK_WIN,
@@ -86,4 +99,3 @@ struct tipc_media ib_media_info = {
        .hwaddr_len     = INFINIBAND_ALEN,
        .name           = "ib"
 };
-
index c5190ab75290d04202b99a3e923a69fe1a9dad38..ad2c57f5868dafe28fbf4204f9fc2189c1156d25 100644 (file)
@@ -37,6 +37,7 @@
 #include "core.h"
 #include "link.h"
 #include "port.h"
+#include "socket.h"
 #include "name_distr.h"
 #include "discover.h"
 #include "config.h"
@@ -101,9 +102,18 @@ static unsigned int align(unsigned int i)
 
 static void link_init_max_pkt(struct tipc_link *l_ptr)
 {
+       struct tipc_bearer *b_ptr;
        u32 max_pkt;
 
-       max_pkt = (l_ptr->b_ptr->mtu & ~3);
+       rcu_read_lock();
+       b_ptr = rcu_dereference_rtnl(bearer_list[l_ptr->bearer_id]);
+       if (!b_ptr) {
+               rcu_read_unlock();
+               return;
+       }
+       max_pkt = (b_ptr->mtu & ~3);
+       rcu_read_unlock();
+
        if (max_pkt > MAX_MSG_SIZE)
                max_pkt = MAX_MSG_SIZE;
 
@@ -248,7 +258,7 @@ struct tipc_link *tipc_link_create(struct tipc_node *n_ptr,
        l_ptr->owner = n_ptr;
        l_ptr->checkpoint = 1;
        l_ptr->peer_session = INVALID_SESSION;
-       l_ptr->b_ptr = b_ptr;
+       l_ptr->bearer_id = b_ptr->identity;
        link_set_supervision_props(l_ptr, b_ptr->tolerance);
        l_ptr->state = RESET_UNKNOWN;
 
@@ -263,6 +273,7 @@ struct tipc_link *tipc_link_create(struct tipc_node *n_ptr,
        l_ptr->priority = b_ptr->priority;
        tipc_link_set_queue_limits(l_ptr, b_ptr->window);
 
+       l_ptr->net_plane = b_ptr->net_plane;
        link_init_max_pkt(l_ptr);
 
        l_ptr->next_out_no = 1;
@@ -287,14 +298,14 @@ void tipc_link_delete_list(unsigned int bearer_id, bool shutting_down)
 
        rcu_read_lock();
        list_for_each_entry_rcu(n_ptr, &tipc_node_list, list) {
-               spin_lock_bh(&n_ptr->lock);
+               tipc_node_lock(n_ptr);
                l_ptr = n_ptr->links[bearer_id];
                if (l_ptr) {
                        tipc_link_reset(l_ptr);
                        if (shutting_down || !tipc_node_is_up(n_ptr)) {
                                tipc_node_detach_link(l_ptr->owner, l_ptr);
                                tipc_link_reset_fragments(l_ptr);
-                               spin_unlock_bh(&n_ptr->lock);
+                               tipc_node_unlock(n_ptr);
 
                                /* Nobody else can access this link now: */
                                del_timer_sync(&l_ptr->timer);
@@ -302,12 +313,12 @@ void tipc_link_delete_list(unsigned int bearer_id, bool shutting_down)
                        } else {
                                /* Detach/delete when failover is finished: */
                                l_ptr->flags |= LINK_STOPPED;
-                               spin_unlock_bh(&n_ptr->lock);
+                               tipc_node_unlock(n_ptr);
                                del_timer_sync(&l_ptr->timer);
                        }
                        continue;
                }
-               spin_unlock_bh(&n_ptr->lock);
+               tipc_node_unlock(n_ptr);
        }
        rcu_read_unlock();
 }
@@ -388,9 +399,8 @@ static void link_release_outqueue(struct tipc_link *l_ptr)
  */
 void tipc_link_reset_fragments(struct tipc_link *l_ptr)
 {
-       kfree_skb(l_ptr->reasm_head);
-       l_ptr->reasm_head = NULL;
-       l_ptr->reasm_tail = NULL;
+       kfree_skb(l_ptr->reasm_buf);
+       l_ptr->reasm_buf = NULL;
 }
 
 /**
@@ -426,7 +436,7 @@ void tipc_link_reset(struct tipc_link *l_ptr)
                return;
 
        tipc_node_link_down(l_ptr->owner, l_ptr);
-       tipc_bearer_remove_dest(l_ptr->b_ptr, l_ptr->addr);
+       tipc_bearer_remove_dest(l_ptr->bearer_id, l_ptr->addr);
 
        if (was_active_link && tipc_node_active_links(l_ptr->owner)) {
                l_ptr->reset_checkpoint = checkpoint;
@@ -464,11 +474,11 @@ void tipc_link_reset_list(unsigned int bearer_id)
 
        rcu_read_lock();
        list_for_each_entry_rcu(n_ptr, &tipc_node_list, list) {
-               spin_lock_bh(&n_ptr->lock);
+               tipc_node_lock(n_ptr);
                l_ptr = n_ptr->links[bearer_id];
                if (l_ptr)
                        tipc_link_reset(l_ptr);
-               spin_unlock_bh(&n_ptr->lock);
+               tipc_node_unlock(n_ptr);
        }
        rcu_read_unlock();
 }
@@ -477,7 +487,7 @@ static void link_activate(struct tipc_link *l_ptr)
 {
        l_ptr->next_in_no = l_ptr->stats.recv_info = 1;
        tipc_node_link_up(l_ptr->owner, l_ptr);
-       tipc_bearer_add_dest(l_ptr->b_ptr, l_ptr->addr);
+       tipc_bearer_add_dest(l_ptr->bearer_id, l_ptr->addr);
 }
 
 /**
@@ -777,7 +787,7 @@ int __tipc_link_xmit(struct tipc_link *l_ptr, struct sk_buff *buf)
        if (likely(!link_congested(l_ptr))) {
                link_add_to_outqueue(l_ptr, buf, msg);
 
-               tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr);
+               tipc_bearer_send(l_ptr->bearer_id, buf, &l_ptr->media_addr);
                l_ptr->unacked_window = 0;
                return dsz;
        }
@@ -825,7 +835,6 @@ int tipc_link_xmit(struct sk_buff *buf, u32 dest, u32 selector)
        struct tipc_node *n_ptr;
        int res = -ELINKCONG;
 
-       read_lock_bh(&tipc_net_lock);
        n_ptr = tipc_node_find(dest);
        if (n_ptr) {
                tipc_node_lock(n_ptr);
@@ -838,7 +847,6 @@ int tipc_link_xmit(struct sk_buff *buf, u32 dest, u32 selector)
        } else {
                kfree_skb(buf);
        }
-       read_unlock_bh(&tipc_net_lock);
        return res;
 }
 
@@ -902,7 +910,6 @@ void tipc_link_names_xmit(struct list_head *message_list, u32 dest)
        if (list_empty(message_list))
                return;
 
-       read_lock_bh(&tipc_net_lock);
        n_ptr = tipc_node_find(dest);
        if (n_ptr) {
                tipc_node_lock(n_ptr);
@@ -917,7 +924,6 @@ void tipc_link_names_xmit(struct list_head *message_list, u32 dest)
                }
                tipc_node_unlock(n_ptr);
        }
-       read_unlock_bh(&tipc_net_lock);
 
        /* discard the messages if they couldn't be sent */
        list_for_each_safe(buf, temp_buf, ((struct sk_buff *)message_list)) {
@@ -941,7 +947,7 @@ static int tipc_link_xmit_fast(struct tipc_link *l_ptr, struct sk_buff *buf,
        if (likely(!link_congested(l_ptr))) {
                if (likely(msg_size(msg) <= l_ptr->max_pkt)) {
                        link_add_to_outqueue(l_ptr, buf, msg);
-                       tipc_bearer_send(l_ptr->b_ptr, buf,
+                       tipc_bearer_send(l_ptr->bearer_id, buf,
                                         &l_ptr->media_addr);
                        l_ptr->unacked_window = 0;
                        return res;
@@ -979,7 +985,6 @@ again:
        if (unlikely(res < 0))
                return res;
 
-       read_lock_bh(&tipc_net_lock);
        node = tipc_node_find(destaddr);
        if (likely(node)) {
                tipc_node_lock(node);
@@ -990,7 +995,6 @@ again:
                                                          &sender->max_pkt);
 exit:
                                tipc_node_unlock(node);
-                               read_unlock_bh(&tipc_net_lock);
                                return res;
                        }
 
@@ -1007,7 +1011,6 @@ exit:
                         */
                        sender->max_pkt = l_ptr->max_pkt;
                        tipc_node_unlock(node);
-                       read_unlock_bh(&tipc_net_lock);
 
 
                        if ((msg_hdr_sz(hdr) + res) <= sender->max_pkt)
@@ -1018,7 +1021,6 @@ exit:
                }
                tipc_node_unlock(node);
        }
-       read_unlock_bh(&tipc_net_lock);
 
        /* Couldn't find a link to the destination node */
        kfree_skb(buf);
@@ -1204,7 +1206,7 @@ static u32 tipc_link_push_packet(struct tipc_link *l_ptr)
        if (r_q_size && buf) {
                msg_set_ack(buf_msg(buf), mod(l_ptr->next_in_no - 1));
                msg_set_bcast_ack(buf_msg(buf), l_ptr->owner->bclink.last_in);
-               tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr);
+               tipc_bearer_send(l_ptr->bearer_id, buf, &l_ptr->media_addr);
                l_ptr->retransm_queue_head = mod(++r_q_head);
                l_ptr->retransm_queue_size = --r_q_size;
                l_ptr->stats.retransmitted++;
@@ -1216,7 +1218,7 @@ static u32 tipc_link_push_packet(struct tipc_link *l_ptr)
        if (buf) {
                msg_set_ack(buf_msg(buf), mod(l_ptr->next_in_no - 1));
                msg_set_bcast_ack(buf_msg(buf), l_ptr->owner->bclink.last_in);
-               tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr);
+               tipc_bearer_send(l_ptr->bearer_id, buf, &l_ptr->media_addr);
                l_ptr->unacked_window = 0;
                kfree_skb(buf);
                l_ptr->proto_msg_queue = NULL;
@@ -1233,7 +1235,8 @@ static u32 tipc_link_push_packet(struct tipc_link *l_ptr)
                if (mod(next - first) < l_ptr->queue_limit[0]) {
                        msg_set_ack(msg, mod(l_ptr->next_in_no - 1));
                        msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
-                       tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr);
+                       tipc_bearer_send(l_ptr->bearer_id, buf,
+                                        &l_ptr->media_addr);
                        if (msg_user(msg) == MSG_BUNDLER)
                                msg_set_type(msg, CLOSED_MSG);
                        l_ptr->next_out = buf->next;
@@ -1256,33 +1259,24 @@ void tipc_link_push_queue(struct tipc_link *l_ptr)
        } while (!res);
 }
 
-static void link_reset_all(unsigned long addr)
+void tipc_link_reset_all(struct tipc_node *node)
 {
-       struct tipc_node *n_ptr;
        char addr_string[16];
        u32 i;
 
-       read_lock_bh(&tipc_net_lock);
-       n_ptr = tipc_node_find((u32)addr);
-       if (!n_ptr) {
-               read_unlock_bh(&tipc_net_lock);
-               return; /* node no longer exists */
-       }
-
-       tipc_node_lock(n_ptr);
+       tipc_node_lock(node);
 
        pr_warn("Resetting all links to %s\n",
-               tipc_addr_string_fill(addr_string, n_ptr->addr));
+               tipc_addr_string_fill(addr_string, node->addr));
 
        for (i = 0; i < MAX_BEARERS; i++) {
-               if (n_ptr->links[i]) {
-                       link_print(n_ptr->links[i], "Resetting link\n");
-                       tipc_link_reset(n_ptr->links[i]);
+               if (node->links[i]) {
+                       link_print(node->links[i], "Resetting link\n");
+                       tipc_link_reset(node->links[i]);
                }
        }
 
-       tipc_node_unlock(n_ptr);
-       read_unlock_bh(&tipc_net_lock);
+       tipc_node_unlock(node);
 }
 
 static void link_retransmit_failure(struct tipc_link *l_ptr,
@@ -1319,10 +1313,9 @@ static void link_retransmit_failure(struct tipc_link *l_ptr,
                        n_ptr->bclink.oos_state,
                        n_ptr->bclink.last_sent);
 
-               tipc_k_signal((Handler)link_reset_all, (unsigned long)n_ptr->addr);
-
                tipc_node_unlock(n_ptr);
 
+               tipc_bclink_set_flags(TIPC_BCLINK_RESET);
                l_ptr->stale_count = 0;
        }
 }
@@ -1352,7 +1345,7 @@ void tipc_link_retransmit(struct tipc_link *l_ptr, struct sk_buff *buf,
                msg = buf_msg(buf);
                msg_set_ack(msg, mod(l_ptr->next_in_no - 1));
                msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
-               tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr);
+               tipc_bearer_send(l_ptr->bearer_id, buf, &l_ptr->media_addr);
                buf = buf->next;
                retransmits--;
                l_ptr->stats.retransmitted++;
@@ -1440,14 +1433,13 @@ static int link_recv_buf_validate(struct sk_buff *buf)
 /**
  * tipc_rcv - process TIPC packets/messages arriving from off-node
  * @head: pointer to message buffer chain
- * @tb_ptr: pointer to bearer message arrived on
+ * @b_ptr: pointer to bearer message arrived on
  *
  * Invoked with no locks held.  Bearer pointer must point to a valid bearer
  * structure (i.e. cannot be NULL), but bearer can be inactive.
  */
 void tipc_rcv(struct sk_buff *head, struct tipc_bearer *b_ptr)
 {
-       read_lock_bh(&tipc_net_lock);
        while (head) {
                struct tipc_node *n_ptr;
                struct tipc_link *l_ptr;
@@ -1497,14 +1489,14 @@ void tipc_rcv(struct sk_buff *head, struct tipc_bearer *b_ptr)
                        goto unlock_discard;
 
                /* Verify that communication with node is currently allowed */
-               if ((n_ptr->block_setup & WAIT_PEER_DOWN) &&
-                       msg_user(msg) == LINK_PROTOCOL &&
-                       (msg_type(msg) == RESET_MSG ||
-                        msg_type(msg) == ACTIVATE_MSG) &&
-                       !msg_redundant_link(msg))
-                       n_ptr->block_setup &= ~WAIT_PEER_DOWN;
-
-               if (n_ptr->block_setup)
+               if ((n_ptr->action_flags & TIPC_WAIT_PEER_LINKS_DOWN) &&
+                   msg_user(msg) == LINK_PROTOCOL &&
+                   (msg_type(msg) == RESET_MSG ||
+                   msg_type(msg) == ACTIVATE_MSG) &&
+                   !msg_redundant_link(msg))
+                       n_ptr->action_flags &= ~TIPC_WAIT_PEER_LINKS_DOWN;
+
+               if (tipc_node_blocked(n_ptr))
                        goto unlock_discard;
 
                /* Validate message sequence number info */
@@ -1581,17 +1573,12 @@ void tipc_rcv(struct sk_buff *head, struct tipc_bearer *b_ptr)
                        }
                        msg = buf_msg(buf);
                } else if (msg_user(msg) == MSG_FRAGMENTER) {
-                       int rc;
-
                        l_ptr->stats.recv_fragments++;
-                       rc = tipc_link_frag_rcv(&l_ptr->reasm_head,
-                                               &l_ptr->reasm_tail,
-                                               &buf);
-                       if (rc == LINK_REASM_COMPLETE) {
+                       if (tipc_buf_append(&l_ptr->reasm_buf, &buf)) {
                                l_ptr->stats.recv_fragmented++;
                                msg = buf_msg(buf);
                        } else {
-                               if (rc == LINK_REASM_ERROR)
+                               if (!l_ptr->reasm_buf)
                                        tipc_link_reset(l_ptr);
                                tipc_node_unlock(n_ptr);
                                continue;
@@ -1604,7 +1591,7 @@ void tipc_rcv(struct sk_buff *head, struct tipc_bearer *b_ptr)
                case TIPC_HIGH_IMPORTANCE:
                case TIPC_CRITICAL_IMPORTANCE:
                        tipc_node_unlock(n_ptr);
-                       tipc_port_rcv(buf);
+                       tipc_sk_rcv(buf);
                        continue;
                case MSG_BUNDLER:
                        l_ptr->stats.recv_bundles++;
@@ -1635,7 +1622,6 @@ unlock_discard:
 discard:
                kfree_skb(buf);
        }
-       read_unlock_bh(&tipc_net_lock);
 }
 
 /**
@@ -1747,12 +1733,12 @@ void tipc_link_proto_xmit(struct tipc_link *l_ptr, u32 msg_typ, int probe_msg,
                return;
 
        /* Abort non-RESET send if communication with node is prohibited */
-       if ((l_ptr->owner->block_setup) && (msg_typ != RESET_MSG))
+       if ((tipc_node_blocked(l_ptr->owner)) && (msg_typ != RESET_MSG))
                return;
 
        /* Create protocol message with "out-of-sequence" sequence number */
        msg_set_type(msg, msg_typ);
-       msg_set_net_plane(msg, l_ptr->b_ptr->net_plane);
+       msg_set_net_plane(msg, l_ptr->net_plane);
        msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
        msg_set_last_bcast(msg, tipc_bclink_get_last_sent());
 
@@ -1818,7 +1804,7 @@ void tipc_link_proto_xmit(struct tipc_link *l_ptr, u32 msg_typ, int probe_msg,
        skb_copy_to_linear_data(buf, msg, sizeof(l_ptr->proto_msg));
        buf->priority = TC_PRIO_CONTROL;
 
-       tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr);
+       tipc_bearer_send(l_ptr->bearer_id, buf, &l_ptr->media_addr);
        l_ptr->unacked_window = 0;
        kfree_skb(buf);
 }
@@ -1840,12 +1826,9 @@ static void tipc_link_proto_rcv(struct tipc_link *l_ptr, struct sk_buff *buf)
        if (l_ptr->exp_msg_count)
                goto exit;
 
-       /* record unnumbered packet arrival (force mismatch on next timeout) */
-       l_ptr->checkpoint--;
-
-       if (l_ptr->b_ptr->net_plane != msg_net_plane(msg))
+       if (l_ptr->net_plane != msg_net_plane(msg))
                if (tipc_own_addr > msg_prevnode(msg))
-                       l_ptr->b_ptr->net_plane = msg_net_plane(msg);
+                       l_ptr->net_plane = msg_net_plane(msg);
 
        switch (msg_type(msg)) {
 
@@ -1862,7 +1845,7 @@ static void tipc_link_proto_rcv(struct tipc_link *l_ptr, struct sk_buff *buf)
                         * peer has lost contact -- don't allow peer's links
                         * to reactivate before we recognize loss & clean up
                         */
-                       l_ptr->owner->block_setup = WAIT_NODE_DOWN;
+                       l_ptr->owner->action_flags |= TIPC_WAIT_OWN_LINKS_DOWN;
                }
 
                link_state_event(l_ptr, RESET_MSG);
@@ -1918,6 +1901,10 @@ static void tipc_link_proto_rcv(struct tipc_link *l_ptr, struct sk_buff *buf)
                        tipc_link_reset(l_ptr); /* Enforce change to take effect */
                        break;
                }
+
+               /* Record reception; force mismatch at next timeout: */
+               l_ptr->checkpoint--;
+
                link_state_event(l_ptr, TRAFFIC_MSG_EVT);
                l_ptr->stats.recv_states++;
                if (link_reset_unknown(l_ptr))
@@ -2177,9 +2164,7 @@ static struct sk_buff *tipc_link_failover_rcv(struct tipc_link *l_ptr,
                }
                if (msg_user(msg) == MSG_FRAGMENTER) {
                        l_ptr->stats.recv_fragments++;
-                       tipc_link_frag_rcv(&l_ptr->reasm_head,
-                                          &l_ptr->reasm_tail,
-                                          &buf);
+                       tipc_buf_append(&l_ptr->reasm_buf, &buf);
                }
        }
 exit:
@@ -2317,53 +2302,6 @@ static int tipc_link_frag_xmit(struct tipc_link *l_ptr, struct sk_buff *buf)
        return dsz;
 }
 
-/* tipc_link_frag_rcv(): Called with node lock on. Returns
- * the reassembled buffer if message is complete.
- */
-int tipc_link_frag_rcv(struct sk_buff **head, struct sk_buff **tail,
-                      struct sk_buff **fbuf)
-{
-       struct sk_buff *frag = *fbuf;
-       struct tipc_msg *msg = buf_msg(frag);
-       u32 fragid = msg_type(msg);
-       bool headstolen;
-       int delta;
-
-       skb_pull(frag, msg_hdr_sz(msg));
-       if (fragid == FIRST_FRAGMENT) {
-               if (*head || skb_unclone(frag, GFP_ATOMIC))
-                       goto out_free;
-               *head = frag;
-               skb_frag_list_init(*head);
-               *fbuf = NULL;
-               return 0;
-       } else if (*head &&
-                  skb_try_coalesce(*head, frag, &headstolen, &delta)) {
-               kfree_skb_partial(frag, headstolen);
-       } else {
-               if (!*head)
-                       goto out_free;
-               if (!skb_has_frag_list(*head))
-                       skb_shinfo(*head)->frag_list = frag;
-               else
-                       (*tail)->next = frag;
-               *tail = frag;
-               (*head)->truesize += frag->truesize;
-       }
-       if (fragid == LAST_FRAGMENT) {
-               *fbuf = *head;
-               *tail = *head = NULL;
-               return LINK_REASM_COMPLETE;
-       }
-       *fbuf = NULL;
-       return 0;
-out_free:
-       pr_warn_ratelimited("Link unable to reassemble fragmented message\n");
-       kfree_skb(*fbuf);
-       *fbuf = NULL;
-       return LINK_REASM_ERROR;
-}
-
 static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tolerance)
 {
        if ((tolerance < TIPC_MIN_LINK_TOL) || (tolerance > TIPC_MAX_LINK_TOL))
@@ -2397,8 +2335,6 @@ void tipc_link_set_queue_limits(struct tipc_link *l_ptr, u32 window)
 /* tipc_link_find_owner - locate owner node of link by link's name
  * @name: pointer to link name string
  * @bearer_id: pointer to index in 'node->links' array where the link was found.
- * Caller must hold 'tipc_net_lock' to ensure node and bearer are not deleted;
- * this also prevents link deletion.
  *
  * Returns pointer to node owning the link, or 0 if no matching link is found.
  */
@@ -2460,7 +2396,7 @@ static int link_value_is_valid(u16 cmd, u32 new_value)
  * @new_value: new value of link, bearer, or media setting
  * @cmd: which link, bearer, or media attribute to set (TIPC_CMD_SET_LINK_*)
  *
- * Caller must hold 'tipc_net_lock' to ensure link/bearer/media is not deleted.
+ * Caller must hold RTNL lock to ensure link/bearer/media is not deleted.
  *
  * Returns 0 if value updated and negative value on error.
  */
@@ -2566,9 +2502,7 @@ struct sk_buff *tipc_link_cmd_config(const void *req_tlv_area, int req_tlv_space
                                                   " (cannot change setting on broadcast link)");
        }
 
-       read_lock_bh(&tipc_net_lock);
        res = link_cmd_set_value(args->name, new_value, cmd);
-       read_unlock_bh(&tipc_net_lock);
        if (res)
                return tipc_cfg_reply_error_string("cannot change link setting");
 
@@ -2602,22 +2536,18 @@ struct sk_buff *tipc_link_cmd_reset_stats(const void *req_tlv_area, int req_tlv_
                        return tipc_cfg_reply_error_string("link not found");
                return tipc_cfg_reply_none();
        }
-       read_lock_bh(&tipc_net_lock);
        node = tipc_link_find_owner(link_name, &bearer_id);
-       if (!node) {
-               read_unlock_bh(&tipc_net_lock);
+       if (!node)
                return tipc_cfg_reply_error_string("link not found");
-       }
+
        tipc_node_lock(node);
        l_ptr = node->links[bearer_id];
        if (!l_ptr) {
                tipc_node_unlock(node);
-               read_unlock_bh(&tipc_net_lock);
                return tipc_cfg_reply_error_string("link not found");
        }
        link_reset_statistics(l_ptr);
        tipc_node_unlock(node);
-       read_unlock_bh(&tipc_net_lock);
        return tipc_cfg_reply_none();
 }
 
@@ -2650,18 +2580,15 @@ static int tipc_link_stats(const char *name, char *buf, const u32 buf_size)
        if (!strcmp(name, tipc_bclink_name))
                return tipc_bclink_stats(buf, buf_size);
 
-       read_lock_bh(&tipc_net_lock);
        node = tipc_link_find_owner(name, &bearer_id);
-       if (!node) {
-               read_unlock_bh(&tipc_net_lock);
+       if (!node)
                return 0;
-       }
+
        tipc_node_lock(node);
 
        l = node->links[bearer_id];
        if (!l) {
                tipc_node_unlock(node);
-               read_unlock_bh(&tipc_net_lock);
                return 0;
        }
 
@@ -2727,7 +2654,6 @@ static int tipc_link_stats(const char *name, char *buf, const u32 buf_size)
                             (s->accu_queue_sz / s->queue_sz_counts) : 0);
 
        tipc_node_unlock(node);
-       read_unlock_bh(&tipc_net_lock);
        return ret;
 }
 
@@ -2778,7 +2704,6 @@ u32 tipc_link_get_max_pkt(u32 dest, u32 selector)
        if (dest == tipc_own_addr)
                return MAX_MSG_SIZE;
 
-       read_lock_bh(&tipc_net_lock);
        n_ptr = tipc_node_find(dest);
        if (n_ptr) {
                tipc_node_lock(n_ptr);
@@ -2787,13 +2712,18 @@ u32 tipc_link_get_max_pkt(u32 dest, u32 selector)
                        res = l_ptr->max_pkt;
                tipc_node_unlock(n_ptr);
        }
-       read_unlock_bh(&tipc_net_lock);
        return res;
 }
 
 static void link_print(struct tipc_link *l_ptr, const char *str)
 {
-       pr_info("%s Link %x<%s>:", str, l_ptr->addr, l_ptr->b_ptr->name);
+       struct tipc_bearer *b_ptr;
+
+       rcu_read_lock();
+       b_ptr = rcu_dereference_rtnl(bearer_list[l_ptr->bearer_id]);
+       if (b_ptr)
+               pr_info("%s Link %x<%s>:", str, l_ptr->addr, b_ptr->name);
+       rcu_read_unlock();
 
        if (link_working_unknown(l_ptr))
                pr_cont(":WU\n");
index 8c0b49b5b2ee6b0751f248cf740254a33e424c6a..200d518b218ede4e0ad7c210c863c546f2362947 100644 (file)
 #include "msg.h"
 #include "node.h"
 
-/* Link reassembly status codes
- */
-#define LINK_REASM_ERROR       -1
-#define LINK_REASM_COMPLETE    1
-
 /* Out-of-range value for link sequence numbers
  */
 #define INVALID_LINK_SEQ 0x10000
@@ -107,7 +102,7 @@ struct tipc_stats {
  * @checkpoint: reference point for triggering link continuity checking
  * @peer_session: link session # being used by peer end of link
  * @peer_bearer_id: bearer id used by link's peer endpoint
- * @b_ptr: pointer to bearer used by link
+ * @bearer_id: local bearer id used by link
  * @tolerance: minimum link continuity loss needed to reset link [in ms]
  * @continuity_interval: link continuity testing interval [in ms]
  * @abort_limit: # of unacknowledged continuity probes needed to reset link
@@ -116,6 +111,7 @@ struct tipc_stats {
  * @proto_msg: template for control messages generated by link
  * @pmsg: convenience pointer to "proto_msg" field
  * @priority: current link priority
+ * @net_plane: current link network plane ('A' through 'H')
  * @queue_limit: outbound message queue congestion thresholds (indexed by user)
  * @exp_msg_count: # of tunnelled messages expected during link changeover
  * @reset_checkpoint: seq # of last acknowledged message at time of link reset
@@ -139,8 +135,7 @@ struct tipc_stats {
  * @next_out: ptr to first unsent outbound message in queue
  * @waiting_ports: linked list of ports waiting for link congestion to abate
  * @long_msg_seq_no: next identifier to use for outbound fragmented messages
- * @reasm_head: list head of partially reassembled inbound message fragments
- * @reasm_tail: last fragment received
+ * @reasm_buf: head of partially reassembled inbound message fragments
  * @stats: collects statistics regarding link activity
  */
 struct tipc_link {
@@ -155,7 +150,7 @@ struct tipc_link {
        u32 checkpoint;
        u32 peer_session;
        u32 peer_bearer_id;
-       struct tipc_bearer *b_ptr;
+       u32 bearer_id;
        u32 tolerance;
        u32 continuity_interval;
        u32 abort_limit;
@@ -167,6 +162,7 @@ struct tipc_link {
        } proto_msg;
        struct tipc_msg *pmsg;
        u32 priority;
+       char net_plane;
        u32 queue_limit[15];    /* queue_limit[0]==window limit */
 
        /* Changeover */
@@ -202,8 +198,7 @@ struct tipc_link {
 
        /* Fragmentation/reassembly */
        u32 long_msg_seq_no;
-       struct sk_buff *reasm_head;
-       struct sk_buff *reasm_tail;
+       struct sk_buff *reasm_buf;
 
        /* Statistics */
        struct tipc_stats stats;
@@ -228,6 +223,7 @@ struct sk_buff *tipc_link_cmd_show_stats(const void *req_tlv_area,
                                         int req_tlv_space);
 struct sk_buff *tipc_link_cmd_reset_stats(const void *req_tlv_area,
                                          int req_tlv_space);
+void tipc_link_reset_all(struct tipc_node *node);
 void tipc_link_reset(struct tipc_link *l_ptr);
 void tipc_link_reset_list(unsigned int bearer_id);
 int tipc_link_xmit(struct sk_buff *buf, u32 dest, u32 selector);
@@ -239,9 +235,6 @@ int tipc_link_iovec_xmit_fast(struct tipc_port *sender,
                              struct iovec const *msg_sect,
                              unsigned int len, u32 destnode);
 void tipc_link_bundle_rcv(struct sk_buff *buf);
-int tipc_link_frag_rcv(struct sk_buff **reasm_head,
-                      struct sk_buff **reasm_tail,
-                      struct sk_buff **fbuf);
 void tipc_link_proto_xmit(struct tipc_link *l_ptr, u32 msg_typ, int prob,
                          u32 gap, u32 tolerance, u32 priority, u32 acked_mtu);
 void tipc_link_push_queue(struct tipc_link *l_ptr);
index e525f8ce1dee09ce0d9baf214bd3dd8e13daa9d8..8be6e94a1ca9790dbbde757b6bd70fe9c5abb428 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * net/tipc/msg.c: TIPC message header routines
  *
- * Copyright (c) 2000-2006, Ericsson AB
+ * Copyright (c) 2000-2006, 2014, Ericsson AB
  * Copyright (c) 2005, 2010-2011, Wind River Systems
  * All rights reserved.
  *
@@ -99,3 +99,56 @@ int tipc_msg_build(struct tipc_msg *hdr, struct iovec const *msg_sect,
        }
        return dsz;
 }
+
+/* tipc_buf_append(): Append a buffer to the fragment list of another buffer
+ * Let first buffer become head buffer
+ * Returns 1 and sets *buf to headbuf if chain is complete, otherwise 0
+ * Leaves headbuf pointer at NULL if failure
+ */
+int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf)
+{
+       struct sk_buff *head = *headbuf;
+       struct sk_buff *frag = *buf;
+       struct sk_buff *tail;
+       struct tipc_msg *msg = buf_msg(frag);
+       u32 fragid = msg_type(msg);
+       bool headstolen;
+       int delta;
+
+       skb_pull(frag, msg_hdr_sz(msg));
+
+       if (fragid == FIRST_FRAGMENT) {
+               if (head || skb_unclone(frag, GFP_ATOMIC))
+                       goto out_free;
+               head = *headbuf = frag;
+               skb_frag_list_init(head);
+               return 0;
+       }
+       if (!head)
+               goto out_free;
+       tail = TIPC_SKB_CB(head)->tail;
+       if (skb_try_coalesce(head, frag, &headstolen, &delta)) {
+               kfree_skb_partial(frag, headstolen);
+       } else {
+               if (!skb_has_frag_list(head))
+                       skb_shinfo(head)->frag_list = frag;
+               else
+                       tail->next = frag;
+               head->truesize += frag->truesize;
+               head->data_len += frag->len;
+               head->len += frag->len;
+               TIPC_SKB_CB(head)->tail = frag;
+       }
+       if (fragid == LAST_FRAGMENT) {
+               *buf = head;
+               TIPC_SKB_CB(head)->tail = NULL;
+               *headbuf = NULL;
+               return 1;
+       }
+       *buf = NULL;
+       return 0;
+out_free:
+       pr_warn_ratelimited("Unable to build fragment list\n");
+       kfree_skb(*buf);
+       return 0;
+}
index 76d1269b944361076855876219d6e59b16c3b23e..503511903d1d25c9c4106f669da0a7def7c9dd46 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * net/tipc/msg.h: Include file for TIPC message header routines
  *
- * Copyright (c) 2000-2007, Ericsson AB
+ * Copyright (c) 2000-2007, 2014, Ericsson AB
  * Copyright (c) 2005-2008, 2010-2011, Wind River Systems
  * All rights reserved.
  *
@@ -711,4 +711,7 @@ void tipc_msg_init(struct tipc_msg *m, u32 user, u32 type, u32 hsize,
                   u32 destnode);
 int tipc_msg_build(struct tipc_msg *hdr, struct iovec const *msg_sect,
                   unsigned int len, int max_size, struct sk_buff **buf);
+
+int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf);
+
 #endif
index aff8041dc1573e3fea829e286e2675e322350b2e..8ce730984aa1f0d429b2325d58862e1ef7799d4d 100644 (file)
 #include "link.h"
 #include "name_distr.h"
 
-#define ITEM_SIZE sizeof(struct distr_item)
-
-/**
- * struct distr_item - publication info distributed to other nodes
- * @type: name sequence type
- * @lower: name sequence lower bound
- * @upper: name sequence upper bound
- * @ref: publishing port reference
- * @key: publication key
- *
- * ===> All fields are stored in network byte order. <===
- *
- * First 3 fields identify (name or) name sequence being published.
- * Reference field uniquely identifies port that published name sequence.
- * Key field uniquely identifies publication, in the event a port has
- * multiple publications of the same name sequence.
- *
- * Note: There is no field that identifies the publishing node because it is
- * the same for all items contained within a publication message.
- */
-struct distr_item {
-       __be32 type;
-       __be32 lower;
-       __be32 upper;
-       __be32 ref;
-       __be32 key;
-};
-
 /**
  * struct publ_list - list of publications made by this node
  * @list: circular list of publications
@@ -127,7 +99,7 @@ static struct sk_buff *named_prepare_buf(u32 type, u32 size, u32 dest)
        return buf;
 }
 
-static void named_cluster_distribute(struct sk_buff *buf)
+void named_cluster_distribute(struct sk_buff *buf)
 {
        struct sk_buff *buf_copy;
        struct tipc_node *n_ptr;
@@ -135,18 +107,18 @@ static void named_cluster_distribute(struct sk_buff *buf)
 
        rcu_read_lock();
        list_for_each_entry_rcu(n_ptr, &tipc_node_list, list) {
-               spin_lock_bh(&n_ptr->lock);
+               tipc_node_lock(n_ptr);
                l_ptr = n_ptr->active_links[n_ptr->addr & 1];
                if (l_ptr) {
                        buf_copy = skb_copy(buf, GFP_ATOMIC);
                        if (!buf_copy) {
-                               spin_unlock_bh(&n_ptr->lock);
+                               tipc_node_unlock(n_ptr);
                                break;
                        }
                        msg_set_destnode(buf_msg(buf_copy), n_ptr->addr);
                        __tipc_link_xmit(l_ptr, buf_copy);
                }
-               spin_unlock_bh(&n_ptr->lock);
+               tipc_node_unlock(n_ptr);
        }
        rcu_read_unlock();
 
@@ -156,7 +128,7 @@ static void named_cluster_distribute(struct sk_buff *buf)
 /**
  * tipc_named_publish - tell other nodes about a new publication by this node
  */
-void tipc_named_publish(struct publication *publ)
+struct sk_buff *tipc_named_publish(struct publication *publ)
 {
        struct sk_buff *buf;
        struct distr_item *item;
@@ -165,23 +137,23 @@ void tipc_named_publish(struct publication *publ)
        publ_lists[publ->scope]->size++;
 
        if (publ->scope == TIPC_NODE_SCOPE)
-               return;
+               return NULL;
 
        buf = named_prepare_buf(PUBLICATION, ITEM_SIZE, 0);
        if (!buf) {
                pr_warn("Publication distribution failure\n");
-               return;
+               return NULL;
        }
 
        item = (struct distr_item *)msg_data(buf_msg(buf));
        publ_to_item(item, publ);
-       named_cluster_distribute(buf);
+       return buf;
 }
 
 /**
  * tipc_named_withdraw - tell other nodes about a withdrawn publication by this node
  */
-void tipc_named_withdraw(struct publication *publ)
+struct sk_buff *tipc_named_withdraw(struct publication *publ)
 {
        struct sk_buff *buf;
        struct distr_item *item;
@@ -190,17 +162,17 @@ void tipc_named_withdraw(struct publication *publ)
        publ_lists[publ->scope]->size--;
 
        if (publ->scope == TIPC_NODE_SCOPE)
-               return;
+               return NULL;
 
        buf = named_prepare_buf(WITHDRAWAL, ITEM_SIZE, 0);
        if (!buf) {
                pr_warn("Withdrawal distribution failure\n");
-               return;
+               return NULL;
        }
 
        item = (struct distr_item *)msg_data(buf_msg(buf));
        publ_to_item(item, publ);
-       named_cluster_distribute(buf);
+       return buf;
 }
 
 /*
@@ -239,31 +211,9 @@ static void named_distribute(struct list_head *message_list, u32 node,
 /**
  * tipc_named_node_up - tell specified node about all publications by this node
  */
-void tipc_named_node_up(unsigned long nodearg)
+void tipc_named_node_up(u32 max_item_buf, u32 node)
 {
-       struct tipc_node *n_ptr;
-       struct tipc_link *l_ptr;
-       struct list_head message_list;
-       u32 node = (u32)nodearg;
-       u32 max_item_buf = 0;
-
-       /* compute maximum amount of publication data to send per message */
-       read_lock_bh(&tipc_net_lock);
-       n_ptr = tipc_node_find(node);
-       if (n_ptr) {
-               tipc_node_lock(n_ptr);
-               l_ptr = n_ptr->active_links[0];
-               if (l_ptr)
-                       max_item_buf = ((l_ptr->max_pkt - INT_H_SIZE) /
-                               ITEM_SIZE) * ITEM_SIZE;
-               tipc_node_unlock(n_ptr);
-       }
-       read_unlock_bh(&tipc_net_lock);
-       if (!max_item_buf)
-               return;
-
-       /* create list of publication messages, then send them as a unit */
-       INIT_LIST_HEAD(&message_list);
+       LIST_HEAD(message_list);
 
        read_lock_bh(&tipc_nametbl_lock);
        named_distribute(&message_list, node, &publ_cluster, max_item_buf);
index 9b312ccfd43e7da41bcab4ca33d5f0f4d5be86cf..b2eed4ec1526a34efd8b191e4b952bf9718f7a2e 100644 (file)
 
 #include "name_table.h"
 
-void tipc_named_publish(struct publication *publ);
-void tipc_named_withdraw(struct publication *publ);
-void tipc_named_node_up(unsigned long node);
+#define ITEM_SIZE sizeof(struct distr_item)
+
+/**
+ * struct distr_item - publication info distributed to other nodes
+ * @type: name sequence type
+ * @lower: name sequence lower bound
+ * @upper: name sequence upper bound
+ * @ref: publishing port reference
+ * @key: publication key
+ *
+ * ===> All fields are stored in network byte order. <===
+ *
+ * First 3 fields identify (name or) name sequence being published.
+ * Reference field uniquely identifies port that published name sequence.
+ * Key field uniquely identifies publication, in the event a port has
+ * multiple publications of the same name sequence.
+ *
+ * Note: There is no field that identifies the publishing node because it is
+ * the same for all items contained within a publication message.
+ */
+struct distr_item {
+       __be32 type;
+       __be32 lower;
+       __be32 upper;
+       __be32 ref;
+       __be32 key;
+};
+
+struct sk_buff *tipc_named_publish(struct publication *publ);
+struct sk_buff *tipc_named_withdraw(struct publication *publ);
+void named_cluster_distribute(struct sk_buff *buf);
+void tipc_named_node_up(u32 max_item_buf, u32 node);
 void tipc_named_rcv(struct sk_buff *buf);
 void tipc_named_reinit(void);
 
index 042e8e3cabc09f84aa5dce626c57a30faf3ca32d..9d7d37d95187c77d9d7490ce7aec4de147a9f2fd 100644 (file)
@@ -664,6 +664,7 @@ struct publication *tipc_nametbl_publish(u32 type, u32 lower, u32 upper,
                                         u32 scope, u32 port_ref, u32 key)
 {
        struct publication *publ;
+       struct sk_buff *buf = NULL;
 
        if (table.local_publ_count >= TIPC_MAX_PUBLICATIONS) {
                pr_warn("Publication failed, local publication limit reached (%u)\n",
@@ -676,9 +677,12 @@ struct publication *tipc_nametbl_publish(u32 type, u32 lower, u32 upper,
                                   tipc_own_addr, port_ref, key);
        if (likely(publ)) {
                table.local_publ_count++;
-               tipc_named_publish(publ);
+               buf = tipc_named_publish(publ);
        }
        write_unlock_bh(&tipc_nametbl_lock);
+
+       if (buf)
+               named_cluster_distribute(buf);
        return publ;
 }
 
@@ -688,15 +692,19 @@ struct publication *tipc_nametbl_publish(u32 type, u32 lower, u32 upper,
 int tipc_nametbl_withdraw(u32 type, u32 lower, u32 ref, u32 key)
 {
        struct publication *publ;
+       struct sk_buff *buf;
 
        write_lock_bh(&tipc_nametbl_lock);
        publ = tipc_nametbl_remove_publ(type, lower, tipc_own_addr, ref, key);
        if (likely(publ)) {
                table.local_publ_count--;
-               tipc_named_withdraw(publ);
+               buf = tipc_named_withdraw(publ);
                write_unlock_bh(&tipc_nametbl_lock);
                list_del_init(&publ->pport_list);
                kfree(publ);
+
+               if (buf)
+                       named_cluster_distribute(buf);
                return 1;
        }
        write_unlock_bh(&tipc_nametbl_lock);
@@ -961,6 +969,7 @@ static void tipc_purge_publications(struct name_seq *seq)
        list_for_each_entry_safe(publ, safe, &info->zone_list, zone_list) {
                tipc_nametbl_remove_publ(publ->type, publ->lower, publ->node,
                                         publ->ref, publ->key);
+               kfree(publ);
        }
 }
 
@@ -982,7 +991,6 @@ void tipc_nametbl_stop(void)
                hlist_for_each_entry_safe(seq, safe, seq_head, ns_list) {
                        tipc_purge_publications(seq);
                }
-               continue;
        }
        kfree(table.types);
        table.types = NULL;
index 4c564eb69e1ad9bd2695e78eb91464a0112e663c..f64375e7f99fa4081ce2a30629071ca3e546aa05 100644 (file)
 #include "name_distr.h"
 #include "subscr.h"
 #include "port.h"
+#include "socket.h"
 #include "node.h"
 #include "config.h"
 
 /*
  * The TIPC locking policy is designed to ensure a very fine locking
  * granularity, permitting complete parallel access to individual
- * port and node/link instances. The code consists of three major
+ * port and node/link instances. The code consists of four major
  * locking domains, each protected with their own disjunct set of locks.
  *
- * 1: The routing hierarchy.
- *    Comprises the structures 'zone', 'cluster', 'node', 'link'
- *    and 'bearer'. The whole hierarchy is protected by a big
- *    read/write lock, tipc_net_lock, to enssure that nothing is added
- *    or removed while code is accessing any of these structures.
- *    This layer must not be called from the two others while they
- *    hold any of their own locks.
- *    Neither must it itself do any upcalls to the other two before
- *    it has released tipc_net_lock and other protective locks.
+ * 1: The bearer level.
+ *    RTNL lock is used to serialize the process of configuring bearer
+ *    on update side, and RCU lock is applied on read side to make
+ *    bearer instance valid on both paths of message transmission and
+ *    reception.
  *
- *   Within the tipc_net_lock domain there are two sub-domains;'node' and
- *   'bearer', where local write operations are permitted,
- *   provided that those are protected by individual spin_locks
- *   per instance. Code holding tipc_net_lock(read) and a node spin_lock
- *   is permitted to poke around in both the node itself and its
- *   subordinate links. I.e, it can update link counters and queues,
- *   change link state, send protocol messages, and alter the
- *   "active_links" array in the node; but it can _not_ remove a link
- *   or a node from the overall structure.
- *   Correspondingly, individual bearers may change status within a
- *   tipc_net_lock(read), protected by an individual spin_lock ber bearer
- *   instance, but it needs tipc_net_lock(write) to remove/add any bearers.
+ * 2: The node and link level.
+ *    All node instances are saved into two tipc_node_list and node_htable
+ *    lists. The two lists are protected by node_list_lock on write side,
+ *    and they are guarded with RCU lock on read side. Especially node
+ *    instance is destroyed only when TIPC module is removed, and we can
+ *    confirm that there has no any user who is accessing the node at the
+ *    moment. Therefore, Except for iterating the two lists within RCU
+ *    protection, it's no needed to hold RCU that we access node instance
+ *    in other places.
  *
+ *    In addition, all members in node structure including link instances
+ *    are protected by node spin lock.
  *
- *  2: The transport level of the protocol.
- *     This consists of the structures port, (and its user level
- *     representations, such as user_port and tipc_sock), reference and
- *     tipc_user (port.c, reg.c, socket.c).
+ * 3: The transport level of the protocol.
+ *    This consists of the structures port, (and its user level
+ *    representations, such as user_port and tipc_sock), reference and
+ *    tipc_user (port.c, reg.c, socket.c).
  *
- *     This layer has four different locks:
+ *    This layer has four different locks:
  *     - The tipc_port spin_lock. This is protecting each port instance
  *       from parallel data access and removal. Since we can not place
  *       this lock in the port itself, it has been placed in the
@@ -96,7 +92,7 @@
  *       There are two such lists; 'port_list', which is used for management,
  *       and 'wait_list', which is used to queue ports during congestion.
  *
- *  3: The name table (name_table.c, name_distr.c, subscription.c)
+ *  4: The name table (name_table.c, name_distr.c, subscription.c)
  *     - There is one big read/write-lock (tipc_nametbl_lock) protecting the
  *       overall name table structure. Nothing must be added/removed to
  *       this structure without holding write access to it.
  *     - A local spin_lock protecting the queue of subscriber events.
 */
 
-DEFINE_RWLOCK(tipc_net_lock);
-
 static void net_route_named_msg(struct sk_buff *buf)
 {
        struct tipc_msg *msg = buf_msg(buf);
@@ -148,7 +142,7 @@ void tipc_net_route_msg(struct sk_buff *buf)
                        if (msg_mcast(msg))
                                tipc_port_mcast_rcv(buf, NULL);
                        else if (msg_destport(msg))
-                               tipc_port_rcv(buf);
+                               tipc_sk_rcv(buf);
                        else
                                net_route_named_msg(buf);
                        return;
@@ -171,22 +165,25 @@ void tipc_net_route_msg(struct sk_buff *buf)
        tipc_link_xmit(buf, dnode, msg_link_selector(msg));
 }
 
-void tipc_net_start(u32 addr)
+int tipc_net_start(u32 addr)
 {
        char addr_string[16];
+       int res;
 
-       write_lock_bh(&tipc_net_lock);
        tipc_own_addr = addr;
        tipc_named_reinit();
        tipc_port_reinit();
-       tipc_bclink_init();
-       write_unlock_bh(&tipc_net_lock);
+       res = tipc_bclink_init();
+       if (res)
+               return res;
 
        tipc_nametbl_publish(TIPC_CFG_SRV, tipc_own_addr, tipc_own_addr,
                             TIPC_ZONE_SCOPE, 0, tipc_own_addr);
+
        pr_info("Started in network mode\n");
        pr_info("Own node address %s, network identity %u\n",
                tipc_addr_string_fill(addr_string, tipc_own_addr), tipc_net_id);
+       return 0;
 }
 
 void tipc_net_stop(void)
@@ -195,11 +192,11 @@ void tipc_net_stop(void)
                return;
 
        tipc_nametbl_withdraw(TIPC_CFG_SRV, tipc_own_addr, 0, tipc_own_addr);
-       write_lock_bh(&tipc_net_lock);
+       rtnl_lock();
        tipc_bearer_stop();
        tipc_bclink_stop();
        tipc_node_stop();
-       write_unlock_bh(&tipc_net_lock);
+       rtnl_unlock();
 
        pr_info("Left network mode\n");
 }
index 079daadb3f7286471cd5146798f6b06328bf99ad..c6c2b46f7c283095c4e29c7e0b11c5cdea2bcc01 100644 (file)
 #ifndef _TIPC_NET_H
 #define _TIPC_NET_H
 
-extern rwlock_t tipc_net_lock;
-
 void tipc_net_route_msg(struct sk_buff *buf);
 
-void tipc_net_start(u32 addr);
+int tipc_net_start(u32 addr);
 void tipc_net_stop(void);
 
 #endif
index 3aaf73de9e2d017e96b3cc1124d5c9420d827abd..ad844d3653409a6f5ad2ceac53e1b52dc48eacaa 100644 (file)
@@ -47,7 +47,7 @@ static int handle_cmd(struct sk_buff *skb, struct genl_info *info)
        int hdr_space = nlmsg_total_size(GENL_HDRLEN + TIPC_GENL_HDRLEN);
        u16 cmd;
 
-       if ((req_userhdr->cmd & 0xC000) && (!capable(CAP_NET_ADMIN)))
+       if ((req_userhdr->cmd & 0xC000) && (!netlink_capable(skb, CAP_NET_ADMIN)))
                cmd = TIPC_CMD_NOT_NET_ADMIN;
        else
                cmd = req_userhdr->cmd;
index 1d3a4999a70ff96a751f3908d0d8e274af63a962..5b44c3041be431955094de87f1815122deeea369 100644 (file)
@@ -108,7 +108,7 @@ struct tipc_node *tipc_node_create(u32 addr)
                        break;
        }
        list_add_tail_rcu(&n_ptr->list, &temp_node->list);
-       n_ptr->block_setup = WAIT_PEER_DOWN;
+       n_ptr->action_flags = TIPC_WAIT_PEER_LINKS_DOWN;
        n_ptr->signature = INVALID_NODE_SIG;
 
        tipc_num_nodes++;
@@ -144,11 +144,13 @@ void tipc_node_stop(void)
 void tipc_node_link_up(struct tipc_node *n_ptr, struct tipc_link *l_ptr)
 {
        struct tipc_link **active = &n_ptr->active_links[0];
+       u32 addr = n_ptr->addr;
 
        n_ptr->working_links++;
-
+       tipc_nametbl_publish(TIPC_LINK_STATE, addr, addr, TIPC_NODE_SCOPE,
+                            l_ptr->bearer_id, addr);
        pr_info("Established link <%s> on network plane %c\n",
-               l_ptr->name, l_ptr->b_ptr->net_plane);
+               l_ptr->name, l_ptr->net_plane);
 
        if (!active[0]) {
                active[0] = active[1] = l_ptr;
@@ -203,16 +205,18 @@ static void node_select_active_links(struct tipc_node *n_ptr)
 void tipc_node_link_down(struct tipc_node *n_ptr, struct tipc_link *l_ptr)
 {
        struct tipc_link **active;
+       u32 addr = n_ptr->addr;
 
        n_ptr->working_links--;
+       tipc_nametbl_withdraw(TIPC_LINK_STATE, addr, l_ptr->bearer_id, addr);
 
        if (!tipc_link_is_active(l_ptr)) {
                pr_info("Lost standby link <%s> on network plane %c\n",
-                       l_ptr->name, l_ptr->b_ptr->net_plane);
+                       l_ptr->name, l_ptr->net_plane);
                return;
        }
        pr_info("Lost link <%s> on network plane %c\n",
-               l_ptr->name, l_ptr->b_ptr->net_plane);
+               l_ptr->name, l_ptr->net_plane);
 
        active = &n_ptr->active_links[0];
        if (active[0] == l_ptr)
@@ -239,7 +243,7 @@ int tipc_node_is_up(struct tipc_node *n_ptr)
 
 void tipc_node_attach_link(struct tipc_node *n_ptr, struct tipc_link *l_ptr)
 {
-       n_ptr->links[l_ptr->b_ptr->identity] = l_ptr;
+       n_ptr->links[l_ptr->bearer_id] = l_ptr;
        spin_lock_bh(&node_list_lock);
        tipc_num_links++;
        spin_unlock_bh(&node_list_lock);
@@ -263,26 +267,12 @@ void tipc_node_detach_link(struct tipc_node *n_ptr, struct tipc_link *l_ptr)
 
 static void node_established_contact(struct tipc_node *n_ptr)
 {
-       tipc_k_signal((Handler)tipc_named_node_up, n_ptr->addr);
+       n_ptr->action_flags |= TIPC_NOTIFY_NODE_UP;
        n_ptr->bclink.oos_state = 0;
        n_ptr->bclink.acked = tipc_bclink_get_last_sent();
        tipc_bclink_add_node(n_ptr->addr);
 }
 
-static void node_name_purge_complete(unsigned long node_addr)
-{
-       struct tipc_node *n_ptr;
-
-       read_lock_bh(&tipc_net_lock);
-       n_ptr = tipc_node_find(node_addr);
-       if (n_ptr) {
-               tipc_node_lock(n_ptr);
-               n_ptr->block_setup &= ~WAIT_NAMES_GONE;
-               tipc_node_unlock(n_ptr);
-       }
-       read_unlock_bh(&tipc_net_lock);
-}
-
 static void node_lost_contact(struct tipc_node *n_ptr)
 {
        char addr_string[16];
@@ -296,10 +286,9 @@ static void node_lost_contact(struct tipc_node *n_ptr)
                kfree_skb_list(n_ptr->bclink.deferred_head);
                n_ptr->bclink.deferred_size = 0;
 
-               if (n_ptr->bclink.reasm_head) {
-                       kfree_skb(n_ptr->bclink.reasm_head);
-                       n_ptr->bclink.reasm_head = NULL;
-                       n_ptr->bclink.reasm_tail = NULL;
+               if (n_ptr->bclink.reasm_buf) {
+                       kfree_skb(n_ptr->bclink.reasm_buf);
+                       n_ptr->bclink.reasm_buf = NULL;
                }
 
                tipc_bclink_remove_node(n_ptr->addr);
@@ -318,12 +307,13 @@ static void node_lost_contact(struct tipc_node *n_ptr)
                tipc_link_reset_fragments(l_ptr);
        }
 
-       /* Notify subscribers */
-       tipc_nodesub_notify(n_ptr);
+       n_ptr->action_flags &= ~TIPC_WAIT_OWN_LINKS_DOWN;
 
-       /* Prevent re-contact with node until cleanup is done */
-       n_ptr->block_setup = WAIT_PEER_DOWN | WAIT_NAMES_GONE;
-       tipc_k_signal((Handler)node_name_purge_complete, n_ptr->addr);
+       /* Notify subscribers and prevent re-contact with node until
+        * cleanup is done.
+        */
+       n_ptr->action_flags |= TIPC_WAIT_PEER_LINKS_DOWN |
+                              TIPC_NOTIFY_NODE_DOWN;
 }
 
 struct sk_buff *tipc_node_get_nodes(const void *req_tlv_area, int req_tlv_space)
@@ -436,3 +426,63 @@ struct sk_buff *tipc_node_get_links(const void *req_tlv_area, int req_tlv_space)
        rcu_read_unlock();
        return buf;
 }
+
+/**
+ * tipc_node_get_linkname - get the name of a link
+ *
+ * @bearer_id: id of the bearer
+ * @node: peer node address
+ * @linkname: link name output buffer
+ *
+ * Returns 0 on success
+ */
+int tipc_node_get_linkname(u32 bearer_id, u32 addr, char *linkname, size_t len)
+{
+       struct tipc_link *link;
+       struct tipc_node *node = tipc_node_find(addr);
+
+       if ((bearer_id >= MAX_BEARERS) || !node)
+               return -EINVAL;
+       tipc_node_lock(node);
+       link = node->links[bearer_id];
+       if (link) {
+               strncpy(linkname, link->name, len);
+               tipc_node_unlock(node);
+               return 0;
+       }
+       tipc_node_unlock(node);
+       return -EINVAL;
+}
+
+void tipc_node_unlock(struct tipc_node *node)
+{
+       LIST_HEAD(nsub_list);
+       struct tipc_link *link;
+       int pkt_sz = 0;
+       u32 addr = 0;
+
+       if (likely(!node->action_flags)) {
+               spin_unlock_bh(&node->lock);
+               return;
+       }
+
+       if (node->action_flags & TIPC_NOTIFY_NODE_DOWN) {
+               list_replace_init(&node->nsub, &nsub_list);
+               node->action_flags &= ~TIPC_NOTIFY_NODE_DOWN;
+       }
+       if (node->action_flags & TIPC_NOTIFY_NODE_UP) {
+               link = node->active_links[0];
+               node->action_flags &= ~TIPC_NOTIFY_NODE_UP;
+               if (link) {
+                       pkt_sz = ((link->max_pkt - INT_H_SIZE) / ITEM_SIZE) *
+                                 ITEM_SIZE;
+                       addr = node->addr;
+               }
+       }
+       spin_unlock_bh(&node->lock);
+
+       if (!list_empty(&nsub_list))
+               tipc_nodesub_notify(&nsub_list);
+       if (pkt_sz)
+               tipc_named_node_up(pkt_sz, addr);
+}
index 7cbb8cec1a932f881cd636a71edb0342070530ae..9087063793f26eb352f9b848a14b27545c8b02be 100644 (file)
  */
 #define INVALID_NODE_SIG 0x10000
 
-/* Flags used to block (re)establishment of contact with a neighboring node */
-#define WAIT_PEER_DOWN 0x0001  /* wait to see that peer's links are down */
-#define WAIT_NAMES_GONE        0x0002  /* wait for peer's publications to be purged */
-#define WAIT_NODE_DOWN 0x0004  /* wait until peer node is declared down */
+/* Flags used to take different actions according to flag type
+ * TIPC_WAIT_PEER_LINKS_DOWN: wait to see that peer's links are down
+ * TIPC_WAIT_OWN_LINKS_DOWN: wait until peer node is declared down
+ * TIPC_NOTIFY_NODE_DOWN: notify node is down
+ * TIPC_NOTIFY_NODE_UP: notify node is up
+ */
+enum {
+       TIPC_WAIT_PEER_LINKS_DOWN       = (1 << 1),
+       TIPC_WAIT_OWN_LINKS_DOWN        = (1 << 2),
+       TIPC_NOTIFY_NODE_DOWN           = (1 << 3),
+       TIPC_NOTIFY_NODE_UP             = (1 << 4)
+};
+
+/**
+ * struct tipc_node_bclink - TIPC node bclink structure
+ * @acked: sequence # of last outbound b'cast message acknowledged by node
+ * @last_in: sequence # of last in-sequence b'cast message received from node
+ * @last_sent: sequence # of last b'cast message sent by node
+ * @oos_state: state tracker for handling OOS b'cast messages
+ * @deferred_size: number of OOS b'cast messages in deferred queue
+ * @deferred_head: oldest OOS b'cast message received from node
+ * @deferred_tail: newest OOS b'cast message received from node
+ * @reasm_buf: broadcast reassembly queue head from node
+ * @recv_permitted: true if node is allowed to receive b'cast messages
+ */
+struct tipc_node_bclink {
+       u32 acked;
+       u32 last_in;
+       u32 last_sent;
+       u32 oos_state;
+       u32 deferred_size;
+       struct sk_buff *deferred_head;
+       struct sk_buff *deferred_tail;
+       struct sk_buff *reasm_buf;
+       bool recv_permitted;
+};
 
 /**
  * struct tipc_node - TIPC node structure
  * @addr: network address of node
  * @lock: spinlock governing access to structure
  * @hash: links to adjacent nodes in unsorted hash chain
- * @list: links to adjacent nodes in sorted list of cluster's nodes
- * @nsub: list of "node down" subscriptions monitoring node
  * @active_links: pointers to active links to node
  * @links: pointers to all links to node
+ * @action_flags: bit mask of different types of node actions
+ * @bclink: broadcast-related info
+ * @list: links to adjacent nodes in sorted list of cluster's nodes
  * @working_links: number of working links to node (both active and standby)
- * @block_setup: bit mask of conditions preventing link establishment to node
  * @link_cnt: number of links to node
  * @signature: node instance identifier
- * @bclink: broadcast-related info
+ * @nsub: list of "node down" subscriptions monitoring node
  * @rcu: rcu struct for tipc_node
- *    @acked: sequence # of last outbound b'cast message acknowledged by node
- *    @last_in: sequence # of last in-sequence b'cast message received from node
- *    @last_sent: sequence # of last b'cast message sent by node
- *    @oos_state: state tracker for handling OOS b'cast messages
- *    @deferred_size: number of OOS b'cast messages in deferred queue
- *    @deferred_head: oldest OOS b'cast message received from node
- *    @deferred_tail: newest OOS b'cast message received from node
- *    @reasm_head: broadcast reassembly queue head from node
- *    @reasm_tail: last broadcast fragment received from node
- *    @recv_permitted: true if node is allowed to receive b'cast messages
  */
 struct tipc_node {
        u32 addr;
        spinlock_t lock;
        struct hlist_node hash;
-       struct list_head list;
-       struct list_head nsub;
        struct tipc_link *active_links[2];
        struct tipc_link *links[MAX_BEARERS];
+       unsigned int action_flags;
+       struct tipc_node_bclink bclink;
+       struct list_head list;
        int link_cnt;
        int working_links;
-       int block_setup;
        u32 signature;
+       struct list_head nsub;
        struct rcu_head rcu;
-       struct {
-               u32 acked;
-               u32 last_in;
-               u32 last_sent;
-               u32 oos_state;
-               u32 deferred_size;
-               struct sk_buff *deferred_head;
-               struct sk_buff *deferred_tail;
-               struct sk_buff *reasm_head;
-               struct sk_buff *reasm_tail;
-               bool recv_permitted;
-       } bclink;
 };
 
 extern struct list_head tipc_node_list;
@@ -118,15 +129,18 @@ int tipc_node_active_links(struct tipc_node *n_ptr);
 int tipc_node_is_up(struct tipc_node *n_ptr);
 struct sk_buff *tipc_node_get_links(const void *req_tlv_area, int req_tlv_space);
 struct sk_buff *tipc_node_get_nodes(const void *req_tlv_area, int req_tlv_space);
+int tipc_node_get_linkname(u32 bearer_id, u32 node, char *linkname, size_t len);
+void tipc_node_unlock(struct tipc_node *node);
 
-static inline void tipc_node_lock(struct tipc_node *n_ptr)
+static inline void tipc_node_lock(struct tipc_node *node)
 {
-       spin_lock_bh(&n_ptr->lock);
+       spin_lock_bh(&node->lock);
 }
 
-static inline void tipc_node_unlock(struct tipc_node *n_ptr)
+static inline bool tipc_node_blocked(struct tipc_node *node)
 {
-       spin_unlock_bh(&n_ptr->lock);
+       return (node->action_flags & (TIPC_WAIT_PEER_LINKS_DOWN |
+               TIPC_NOTIFY_NODE_DOWN | TIPC_WAIT_OWN_LINKS_DOWN));
 }
 
 #endif
index 8a7384c04add4bdc6db6ae4451ebb2232e4b338b..7c59ab1d6ecb3dc26c4efb77cd243a00341c7b5b 100644 (file)
@@ -81,14 +81,13 @@ void tipc_nodesub_unsubscribe(struct tipc_node_subscr *node_sub)
  *
  * Note: node is locked by caller
  */
-void tipc_nodesub_notify(struct tipc_node *node)
+void tipc_nodesub_notify(struct list_head *nsub_list)
 {
-       struct tipc_node_subscr *ns;
+       struct tipc_node_subscr *ns, *safe;
 
-       list_for_each_entry(ns, &node->nsub, nodesub_list) {
+       list_for_each_entry_safe(ns, safe, nsub_list, nodesub_list) {
                if (ns->handle_node_down) {
-                       tipc_k_signal((Handler)ns->handle_node_down,
-                                     (unsigned long)ns->usr_handle);
+                       ns->handle_node_down(ns->usr_handle);
                        ns->handle_node_down = NULL;
                }
        }
index c95d20727ded3ea70cf9532a82cd8ff20fde415d..d91b8cc81e3d786948bd4ed9ac622689c4218ea1 100644 (file)
@@ -58,6 +58,6 @@ struct tipc_node_subscr {
 void tipc_nodesub_subscribe(struct tipc_node_subscr *node_sub, u32 addr,
                            void *usr_handle, net_ev_handler handle_down);
 void tipc_nodesub_unsubscribe(struct tipc_node_subscr *node_sub);
-void tipc_nodesub_notify(struct tipc_node *node);
+void tipc_nodesub_notify(struct list_head *nsub_list);
 
 #endif
index 5c14c7801ee65095d809d502cab9f78d33d51e5e..5fd7acce01ea339b7ffe2873956e9513eb40bb49 100644 (file)
@@ -165,7 +165,7 @@ void tipc_port_mcast_rcv(struct sk_buff *buf, struct tipc_port_list *dp)
                msg_set_destnode(msg, tipc_own_addr);
                if (dp->count == 1) {
                        msg_set_destport(msg, dp->ports[0]);
-                       tipc_port_rcv(buf);
+                       tipc_sk_rcv(buf);
                        tipc_port_list_free(dp);
                        return;
                }
@@ -180,7 +180,7 @@ void tipc_port_mcast_rcv(struct sk_buff *buf, struct tipc_port_list *dp)
                        if ((index == 0) && (cnt != 0))
                                item = item->next;
                        msg_set_destport(buf_msg(b), item->ports[index]);
-                       tipc_port_rcv(b);
+                       tipc_sk_rcv(b);
                }
        }
 exit:
@@ -343,7 +343,7 @@ int tipc_reject_msg(struct sk_buff *buf, u32 err)
        /* send returned message & dispose of rejected message */
        src_node = msg_prevnode(msg);
        if (in_own_node(src_node))
-               tipc_port_rcv(rbuf);
+               tipc_sk_rcv(rbuf);
        else
                tipc_link_xmit(rbuf, src_node, msg_link_selector(rmsg));
 exit:
@@ -754,37 +754,6 @@ int tipc_port_shutdown(u32 ref)
        return tipc_port_disconnect(ref);
 }
 
-/**
- * tipc_port_rcv - receive message from lower layer and deliver to port user
- */
-int tipc_port_rcv(struct sk_buff *buf)
-{
-       struct tipc_port *p_ptr;
-       struct tipc_msg *msg = buf_msg(buf);
-       u32 destport = msg_destport(msg);
-       u32 dsz = msg_data_sz(msg);
-       u32 err;
-
-       /* forward unresolved named message */
-       if (unlikely(!destport)) {
-               tipc_net_route_msg(buf);
-               return dsz;
-       }
-
-       /* validate destination & pass to port, otherwise reject message */
-       p_ptr = tipc_port_lock(destport);
-       if (likely(p_ptr)) {
-               err = tipc_sk_rcv(&tipc_port_to_sock(p_ptr)->sk, buf);
-               tipc_port_unlock(p_ptr);
-               if (likely(!err))
-                       return dsz;
-       } else {
-               err = TIPC_ERR_NO_PORT;
-       }
-
-       return tipc_reject_msg(buf, err);
-}
-
 /*
  *  tipc_port_iovec_rcv: Concatenate and deliver sectioned
  *                       message for this node.
@@ -798,7 +767,7 @@ static int tipc_port_iovec_rcv(struct tipc_port *sender,
 
        res = tipc_msg_build(&sender->phdr, msg_sect, len, MAX_MSG_SIZE, &buf);
        if (likely(buf))
-               tipc_port_rcv(buf);
+               tipc_sk_rcv(buf);
        return res;
 }
 
index a00397393bd1d9179bd779339500e34bd710aa5b..cf4ca5b1d9a48ae7752f9f476cad079e3f115da8 100644 (file)
 #include "msg.h"
 #include "node_subscr.h"
 
-#define TIPC_FLOW_CONTROL_WIN 512
-#define CONN_OVERLOAD_LIMIT    ((TIPC_FLOW_CONTROL_WIN * 2 + 1) * \
-                               SKB_TRUESIZE(TIPC_MAX_USER_MSG_SIZE))
+#define TIPC_CONNACK_INTV         256
+#define TIPC_FLOWCTRL_WIN        (TIPC_CONNACK_INTV * 2)
+#define TIPC_CONN_OVERLOAD_LIMIT ((TIPC_FLOWCTRL_WIN * 2 + 1) * \
+                                 SKB_TRUESIZE(TIPC_MAX_USER_MSG_SIZE))
 
 /**
  * struct tipc_port - TIPC port structure
@@ -134,7 +135,6 @@ int tipc_port_peer_msg(struct tipc_port *p_ptr, struct tipc_msg *msg);
 /*
  * TIPC messaging routines
  */
-int tipc_port_rcv(struct sk_buff *buf);
 
 int tipc_send(struct tipc_port *port,
              struct iovec const *msg_sect,
@@ -187,7 +187,7 @@ static inline void tipc_port_unlock(struct tipc_port *p_ptr)
 
 static inline int tipc_port_congested(struct tipc_port *p_ptr)
 {
-       return (p_ptr->sent - p_ptr->acked) >= (TIPC_FLOW_CONTROL_WIN * 2);
+       return ((p_ptr->sent - p_ptr->acked) >= TIPC_FLOWCTRL_WIN);
 }
 
 
index 3c0256962f7dafa4ee3b11d69aed963822c888c2..ac08966f285867673b55b62a83d7f9beb57f2a4c 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * net/tipc/socket.c: TIPC socket API
+* net/tipc/socket.c: TIPC socket API
  *
  * Copyright (c) 2001-2007, 2012-2014, Ericsson AB
  * Copyright (c) 2004-2008, 2010-2013, Wind River Systems
@@ -36,6 +36,7 @@
 
 #include "core.h"
 #include "port.h"
+#include "node.h"
 
 #include <linux/export.h>
 
@@ -44,7 +45,7 @@
 
 #define CONN_TIMEOUT_DEFAULT   8000    /* default connect timeout = 8s */
 
-static int backlog_rcv(struct sock *sk, struct sk_buff *skb);
+static int tipc_backlog_rcv(struct sock *sk, struct sk_buff *skb);
 static void tipc_data_ready(struct sock *sk);
 static void tipc_write_space(struct sock *sk);
 static int tipc_release(struct socket *sock);
@@ -195,11 +196,12 @@ static int tipc_sk_create(struct net *net, struct socket *sock,
        sock->state = state;
 
        sock_init_data(sock, sk);
-       sk->sk_backlog_rcv = backlog_rcv;
+       sk->sk_backlog_rcv = tipc_backlog_rcv;
        sk->sk_rcvbuf = sysctl_tipc_rmem[1];
        sk->sk_data_ready = tipc_data_ready;
        sk->sk_write_space = tipc_write_space;
-       tipc_sk(sk)->conn_timeout = CONN_TIMEOUT_DEFAULT;
+       tsk->conn_timeout = CONN_TIMEOUT_DEFAULT;
+       atomic_set(&tsk->dupl_rcvcnt, 0);
        tipc_port_unlock(port);
 
        if (sock->state == SS_READY) {
@@ -1100,7 +1102,7 @@ restart:
        /* Consume received message (optional) */
        if (likely(!(flags & MSG_PEEK))) {
                if ((sock->state != SS_READY) &&
-                   (++port->conn_unacked >= TIPC_FLOW_CONTROL_WIN))
+                   (++port->conn_unacked >= TIPC_CONNACK_INTV))
                        tipc_acknowledge(port->ref, port->conn_unacked);
                advance_rx_queue(sk);
        }
@@ -1209,7 +1211,7 @@ restart:
 
        /* Consume received message (optional) */
        if (likely(!(flags & MSG_PEEK))) {
-               if (unlikely(++port->conn_unacked >= TIPC_FLOW_CONTROL_WIN))
+               if (unlikely(++port->conn_unacked >= TIPC_CONNACK_INTV))
                        tipc_acknowledge(port->ref, port->conn_unacked);
                advance_rx_queue(sk);
        }
@@ -1415,7 +1417,7 @@ static u32 filter_rcv(struct sock *sk, struct sk_buff *buf)
 }
 
 /**
- * backlog_rcv - handle incoming message from backlog queue
+ * tipc_backlog_rcv - handle incoming message from backlog queue
  * @sk: socket
  * @buf: message
  *
@@ -1423,47 +1425,73 @@ static u32 filter_rcv(struct sock *sk, struct sk_buff *buf)
  *
  * Returns 0
  */
-static int backlog_rcv(struct sock *sk, struct sk_buff *buf)
+static int tipc_backlog_rcv(struct sock *sk, struct sk_buff *buf)
 {
        u32 res;
+       struct tipc_sock *tsk = tipc_sk(sk);
 
        res = filter_rcv(sk, buf);
-       if (res)
+       if (unlikely(res))
                tipc_reject_msg(buf, res);
+
+       if (atomic_read(&tsk->dupl_rcvcnt) < TIPC_CONN_OVERLOAD_LIMIT)
+               atomic_add(buf->truesize, &tsk->dupl_rcvcnt);
+
        return 0;
 }
 
 /**
  * tipc_sk_rcv - handle incoming message
- * @sk:  socket receiving message
- * @buf: message
- *
- * Called with port lock already taken.
- *
- * Returns TIPC error status code (TIPC_OK if message is not to be rejected)
+ * @buf: buffer containing arriving message
+ * Consumes buffer
+ * Returns 0 if success, or errno: -EHOSTUNREACH
  */
-u32 tipc_sk_rcv(struct sock *sk, struct sk_buff *buf)
+int tipc_sk_rcv(struct sk_buff *buf)
 {
-       u32 res;
+       struct tipc_sock *tsk;
+       struct tipc_port *port;
+       struct sock *sk;
+       u32 dport = msg_destport(buf_msg(buf));
+       int err = TIPC_OK;
+       uint limit;
 
-       /*
-        * Process message if socket is unlocked; otherwise add to backlog queue
-        *
-        * This code is based on sk_receive_skb(), but must be distinct from it
-        * since a TIPC-specific filter/reject mechanism is utilized
-        */
+       /* Forward unresolved named message */
+       if (unlikely(!dport)) {
+               tipc_net_route_msg(buf);
+               return 0;
+       }
+
+       /* Validate destination */
+       port = tipc_port_lock(dport);
+       if (unlikely(!port)) {
+               err = TIPC_ERR_NO_PORT;
+               goto exit;
+       }
+
+       tsk = tipc_port_to_sock(port);
+       sk = &tsk->sk;
+
+       /* Queue message */
        bh_lock_sock(sk);
+
        if (!sock_owned_by_user(sk)) {
-               res = filter_rcv(sk, buf);
+               err = filter_rcv(sk, buf);
        } else {
-               if (sk_add_backlog(sk, buf, rcvbuf_limit(sk, buf)))
-                       res = TIPC_ERR_OVERLOAD;
-               else
-                       res = TIPC_OK;
+               if (sk->sk_backlog.len == 0)
+                       atomic_set(&tsk->dupl_rcvcnt, 0);
+               limit = rcvbuf_limit(sk, buf) + atomic_read(&tsk->dupl_rcvcnt);
+               if (sk_add_backlog(sk, buf, limit))
+                       err = TIPC_ERR_OVERLOAD;
        }
+
        bh_unlock_sock(sk);
+       tipc_port_unlock(port);
 
-       return res;
+       if (likely(!err))
+               return 0;
+exit:
+       tipc_reject_msg(buf, err);
+       return -EHOSTUNREACH;
 }
 
 static int tipc_wait_for_connect(struct socket *sock, long *timeo_p)
@@ -1905,6 +1933,28 @@ static int tipc_getsockopt(struct socket *sock, int lvl, int opt,
        return put_user(sizeof(value), ol);
 }
 
+int tipc_ioctl(struct socket *sk, unsigned int cmd, unsigned long arg)
+{
+       struct tipc_sioc_ln_req lnr;
+       void __user *argp = (void __user *)arg;
+
+       switch (cmd) {
+       case SIOCGETLINKNAME:
+               if (copy_from_user(&lnr, argp, sizeof(lnr)))
+                       return -EFAULT;
+               if (!tipc_node_get_linkname(lnr.bearer_id, lnr.peer,
+                                           lnr.linkname, TIPC_MAX_LINK_NAME)) {
+                       if (copy_to_user(argp, &lnr, sizeof(lnr)))
+                               return -EFAULT;
+                       return 0;
+               }
+               return -EADDRNOTAVAIL;
+               break;
+       default:
+               return -ENOIOCTLCMD;
+       }
+}
+
 /* Protocol switches for the various types of TIPC sockets */
 
 static const struct proto_ops msg_ops = {
@@ -1917,7 +1967,7 @@ static const struct proto_ops msg_ops = {
        .accept         = sock_no_accept,
        .getname        = tipc_getname,
        .poll           = tipc_poll,
-       .ioctl          = sock_no_ioctl,
+       .ioctl          = tipc_ioctl,
        .listen         = sock_no_listen,
        .shutdown       = tipc_shutdown,
        .setsockopt     = tipc_setsockopt,
@@ -1938,7 +1988,7 @@ static const struct proto_ops packet_ops = {
        .accept         = tipc_accept,
        .getname        = tipc_getname,
        .poll           = tipc_poll,
-       .ioctl          = sock_no_ioctl,
+       .ioctl          = tipc_ioctl,
        .listen         = tipc_listen,
        .shutdown       = tipc_shutdown,
        .setsockopt     = tipc_setsockopt,
@@ -1959,7 +2009,7 @@ static const struct proto_ops stream_ops = {
        .accept         = tipc_accept,
        .getname        = tipc_getname,
        .poll           = tipc_poll,
-       .ioctl          = sock_no_ioctl,
+       .ioctl          = tipc_ioctl,
        .listen         = tipc_listen,
        .shutdown       = tipc_shutdown,
        .setsockopt     = tipc_setsockopt,
index 74e5c7f195a660d6a1e88d1b506cf4e7371566f1..3afcd2a70b313c21d67752606b839e613d3cf9df 100644 (file)
  * @port: port - interacts with 'sk' and with the rest of the TIPC stack
  * @peer_name: the peer of the connection, if any
  * @conn_timeout: the time we can wait for an unresponded setup request
+ * @dupl_rcvcnt: number of bytes counted twice, in both backlog and rcv queue
  */
 
 struct tipc_sock {
        struct sock sk;
        struct tipc_port port;
        unsigned int conn_timeout;
+       atomic_t dupl_rcvcnt;
 };
 
 static inline struct tipc_sock *tipc_sk(const struct sock *sk)
@@ -67,6 +69,6 @@ static inline void tipc_sock_wakeup(struct tipc_sock *tsk)
        tsk->sk.sk_write_space(&tsk->sk);
 }
 
-u32 tipc_sk_rcv(struct sock *sk, struct sk_buff *buf);
+int tipc_sk_rcv(struct sk_buff *buf);
 
 #endif
index bb7e8ba821f44014d65f3669e017b8c9cca54f53..7b9114e0a5b14949e11f312e34a8b5b26c5a5a39 100644 (file)
@@ -1492,10 +1492,14 @@ static int unix_dgram_sendmsg(struct kiocb *kiocb, struct socket *sock,
        if (len > sk->sk_sndbuf - 32)
                goto out;
 
-       if (len > SKB_MAX_ALLOC)
+       if (len > SKB_MAX_ALLOC) {
                data_len = min_t(size_t,
                                 len - SKB_MAX_ALLOC,
                                 MAX_SKB_FRAGS * PAGE_SIZE);
+               data_len = PAGE_ALIGN(data_len);
+
+               BUILD_BUG_ON(SKB_MAX_ALLOC < PAGE_SIZE);
+       }
 
        skb = sock_alloc_send_pskb(sk, len - data_len, data_len,
                                   msg->msg_flags & MSG_DONTWAIT, &err,
@@ -1670,6 +1674,8 @@ static int unix_stream_sendmsg(struct kiocb *kiocb, struct socket *sock,
 
                data_len = max_t(int, 0, size - SKB_MAX_HEAD(0));
 
+               data_len = min_t(size_t, size, PAGE_ALIGN(data_len));
+
                skb = sock_alloc_send_pskb(sk, size - data_len, data_len,
                                           msg->msg_flags & MSG_DONTWAIT, &err,
                                           get_order(UNIX_SKB_FRAGS_SZ));
index 5adfd94c5b85d3d48a6d48d3a4c7c2fa98526d8b..85d232bed87d21f3c23cd695b83defef5a6f22c1 100644 (file)
@@ -1925,9 +1925,23 @@ static struct miscdevice vsock_device = {
        .fops           = &vsock_device_ops,
 };
 
-static int __vsock_core_init(void)
+int __vsock_core_init(const struct vsock_transport *t, struct module *owner)
 {
-       int err;
+       int err = mutex_lock_interruptible(&vsock_register_mutex);
+
+       if (err)
+               return err;
+
+       if (transport) {
+               err = -EBUSY;
+               goto err_busy;
+       }
+
+       /* Transport must be the owner of the protocol so that it can't
+        * unload while there are open sockets.
+        */
+       vsock_proto.owner = owner;
+       transport = t;
 
        vsock_init_tables();
 
@@ -1951,36 +1965,19 @@ static int __vsock_core_init(void)
                goto err_unregister_proto;
        }
 
+       mutex_unlock(&vsock_register_mutex);
        return 0;
 
 err_unregister_proto:
        proto_unregister(&vsock_proto);
 err_misc_deregister:
        misc_deregister(&vsock_device);
-       return err;
-}
-
-int vsock_core_init(const struct vsock_transport *t)
-{
-       int retval = mutex_lock_interruptible(&vsock_register_mutex);
-       if (retval)
-               return retval;
-
-       if (transport) {
-               retval = -EBUSY;
-               goto out;
-       }
-
-       transport = t;
-       retval = __vsock_core_init();
-       if (retval)
-               transport = NULL;
-
-out:
+       transport = NULL;
+err_busy:
        mutex_unlock(&vsock_register_mutex);
-       return retval;
+       return err;
 }
-EXPORT_SYMBOL_GPL(vsock_core_init);
+EXPORT_SYMBOL_GPL(__vsock_core_init);
 
 void vsock_core_exit(void)
 {
@@ -2000,5 +1997,5 @@ EXPORT_SYMBOL_GPL(vsock_core_exit);
 
 MODULE_AUTHOR("VMware, Inc.");
 MODULE_DESCRIPTION("VMware Virtual Socket Family");
-MODULE_VERSION("1.0.0.0-k");
+MODULE_VERSION("1.0.1.0-k");
 MODULE_LICENSE("GPL v2");
index 16d08b39921071479456e2033c5e371d381175b7..405f3c4cf70ca3617a4101e1bad278b93d7ae1b7 100644 (file)
@@ -95,6 +95,43 @@ config CFG80211_CERTIFICATION_ONUS
          you are a wireless researcher and are working in a controlled
          and approved environment by your local regulatory agency.
 
+config CFG80211_REG_CELLULAR_HINTS
+       bool "cfg80211 regulatory support for cellular base station hints"
+       depends on CFG80211_CERTIFICATION_ONUS
+       ---help---
+         This option enables support for parsing regulatory hints
+         from cellular base stations. If enabled and at least one driver
+         claims support for parsing cellular base station hints the
+         regulatory core will allow and parse these regulatory hints.
+         The regulatory core will only apply these regulatory hints on
+         drivers that support this feature. You should only enable this
+         feature if you have tested and validated this feature on your
+         systems.
+
+config CFG80211_REG_RELAX_NO_IR
+       bool "cfg80211 support for NO_IR relaxation"
+       depends on CFG80211_CERTIFICATION_ONUS
+       ---help---
+        This option enables support for relaxation of the NO_IR flag for
+        situations that certain regulatory bodies have provided clarifications
+        on how relaxation can occur. This feature has an inherent dependency on
+        userspace features which must have been properly tested and as such is
+        not enabled by default.
+
+        A relaxation feature example is allowing the operation of a P2P group
+        owner (GO) on channels marked with NO_IR if there is an additional BSS
+        interface which associated to an AP which userspace assumes or confirms
+        to be an authorized master, i.e., with radar detection support and DFS
+        capabilities. However, note that in order to not create daisy chain
+        scenarios, this relaxation is not allowed in cases that the BSS client
+        is associated to P2P GO and in addition the P2P GO instantiated on
+        a channel due to this relaxation should not allow connection from
+        non P2P clients.
+
+        The regulatory core will apply these relaxations only for drivers that
+        support this feature by declaring the appropriate channel flags and
+        capabilities in their registration flow.
+
 config CFG80211_DEFAULT_PS
        bool "enable powersave by default"
        depends on CFG80211
index 9c9501a35fb5c6a43a142132306998405e086774..84d686e2dbd04a0402a652995ec468ba6df06795 100644 (file)
@@ -326,28 +326,57 @@ static int cfg80211_get_chans_dfs_required(struct wiphy *wiphy,
 
 
 int cfg80211_chandef_dfs_required(struct wiphy *wiphy,
-                                 const struct cfg80211_chan_def *chandef)
+                                 const struct cfg80211_chan_def *chandef,
+                                 enum nl80211_iftype iftype)
 {
        int width;
-       int r;
+       int ret;
 
        if (WARN_ON(!cfg80211_chandef_valid(chandef)))
                return -EINVAL;
 
-       width = cfg80211_chandef_get_width(chandef);
-       if (width < 0)
-               return -EINVAL;
+       switch (iftype) {
+       case NL80211_IFTYPE_ADHOC:
+       case NL80211_IFTYPE_AP:
+       case NL80211_IFTYPE_P2P_GO:
+       case NL80211_IFTYPE_MESH_POINT:
+               width = cfg80211_chandef_get_width(chandef);
+               if (width < 0)
+                       return -EINVAL;
 
-       r = cfg80211_get_chans_dfs_required(wiphy, chandef->center_freq1,
-                                           width);
-       if (r)
-               return r;
+               ret = cfg80211_get_chans_dfs_required(wiphy,
+                                                     chandef->center_freq1,
+                                                     width);
+               if (ret < 0)
+                       return ret;
+               else if (ret > 0)
+                       return BIT(chandef->width);
 
-       if (!chandef->center_freq2)
-               return 0;
+               if (!chandef->center_freq2)
+                       return 0;
+
+               ret = cfg80211_get_chans_dfs_required(wiphy,
+                                                     chandef->center_freq2,
+                                                     width);
+               if (ret < 0)
+                       return ret;
+               else if (ret > 0)
+                       return BIT(chandef->width);
 
-       return cfg80211_get_chans_dfs_required(wiphy, chandef->center_freq2,
-                                              width);
+               break;
+       case NL80211_IFTYPE_STATION:
+       case NL80211_IFTYPE_P2P_CLIENT:
+       case NL80211_IFTYPE_MONITOR:
+       case NL80211_IFTYPE_AP_VLAN:
+       case NL80211_IFTYPE_WDS:
+       case NL80211_IFTYPE_P2P_DEVICE:
+       case NL80211_IFTYPE_UNSPECIFIED:
+               break;
+       case NUM_NL80211_IFTYPES:
+               WARN_ON(1);
+       }
+
+       return 0;
 }
 EXPORT_SYMBOL(cfg80211_chandef_dfs_required);
 
@@ -587,12 +616,14 @@ bool cfg80211_chandef_usable(struct wiphy *wiphy,
                width = 5;
                break;
        case NL80211_CHAN_WIDTH_10:
+               prohibited_flags |= IEEE80211_CHAN_NO_10MHZ;
                width = 10;
                break;
        case NL80211_CHAN_WIDTH_20:
                if (!ht_cap->ht_supported)
                        return false;
        case NL80211_CHAN_WIDTH_20_NOHT:
+               prohibited_flags |= IEEE80211_CHAN_NO_20MHZ;
                width = 20;
                break;
        case NL80211_CHAN_WIDTH_40:
@@ -661,17 +692,112 @@ bool cfg80211_chandef_usable(struct wiphy *wiphy,
 }
 EXPORT_SYMBOL(cfg80211_chandef_usable);
 
+/*
+ * For GO only, check if the channel can be used under permissive conditions
+ * mandated by the some regulatory bodies, i.e., the channel is marked with
+ * IEEE80211_CHAN_GO_CONCURRENT and there is an additional station interface
+ * associated to an AP on the same channel or on the same UNII band
+ * (assuming that the AP is an authorized master).
+ * In addition allow the GO to operate on a channel on which indoor operation is
+ * allowed, iff we are currently operating in an indoor environment.
+ */
+static bool cfg80211_go_permissive_chan(struct cfg80211_registered_device *rdev,
+                                       struct ieee80211_channel *chan)
+{
+       struct wireless_dev *wdev_iter;
+       struct wiphy *wiphy = wiphy_idx_to_wiphy(rdev->wiphy_idx);
+
+       ASSERT_RTNL();
+
+       if (!config_enabled(CONFIG_CFG80211_REG_RELAX_NO_IR) ||
+           !(wiphy->regulatory_flags & REGULATORY_ENABLE_RELAX_NO_IR))
+               return false;
+
+       if (regulatory_indoor_allowed() &&
+           (chan->flags & IEEE80211_CHAN_INDOOR_ONLY))
+               return true;
+
+       if (!(chan->flags & IEEE80211_CHAN_GO_CONCURRENT))
+               return false;
+
+       /*
+        * Generally, it is possible to rely on another device/driver to allow
+        * the GO concurrent relaxation, however, since the device can further
+        * enforce the relaxation (by doing a similar verifications as this),
+        * and thus fail the GO instantiation, consider only the interfaces of
+        * the current registered device.
+        */
+       list_for_each_entry(wdev_iter, &rdev->wdev_list, list) {
+               struct ieee80211_channel *other_chan = NULL;
+               int r1, r2;
+
+               if (wdev_iter->iftype != NL80211_IFTYPE_STATION ||
+                   !netif_running(wdev_iter->netdev))
+                       continue;
+
+               wdev_lock(wdev_iter);
+               if (wdev_iter->current_bss)
+                       other_chan = wdev_iter->current_bss->pub.channel;
+               wdev_unlock(wdev_iter);
+
+               if (!other_chan)
+                       continue;
+
+               if (chan == other_chan)
+                       return true;
+
+               if (chan->band != IEEE80211_BAND_5GHZ)
+                       continue;
+
+               r1 = cfg80211_get_unii(chan->center_freq);
+               r2 = cfg80211_get_unii(other_chan->center_freq);
+
+               if (r1 != -EINVAL && r1 == r2) {
+                       /*
+                        * At some locations channels 149-165 are considered a
+                        * bundle, but at other locations, e.g., Indonesia,
+                        * channels 149-161 are considered a bundle while
+                        * channel 165 is left out and considered to be in a
+                        * different bundle. Thus, in case that there is a
+                        * station interface connected to an AP on channel 165,
+                        * it is assumed that channels 149-161 are allowed for
+                        * GO operations. However, having a station interface
+                        * connected to an AP on channels 149-161, does not
+                        * allow GO operation on channel 165.
+                        */
+                       if (chan->center_freq == 5825 &&
+                           other_chan->center_freq != 5825)
+                               continue;
+                       return true;
+               }
+       }
+
+       return false;
+}
+
 bool cfg80211_reg_can_beacon(struct wiphy *wiphy,
-                            struct cfg80211_chan_def *chandef)
+                            struct cfg80211_chan_def *chandef,
+                            enum nl80211_iftype iftype)
 {
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
        bool res;
        u32 prohibited_flags = IEEE80211_CHAN_DISABLED |
-                              IEEE80211_CHAN_NO_IR |
                               IEEE80211_CHAN_RADAR;
 
-       trace_cfg80211_reg_can_beacon(wiphy, chandef);
+       trace_cfg80211_reg_can_beacon(wiphy, chandef, iftype);
 
-       if (cfg80211_chandef_dfs_required(wiphy, chandef) > 0 &&
+       /*
+        * Under certain conditions suggested by the some regulatory bodies
+        * a GO can operate on channels marked with IEEE80211_NO_IR
+        * so set this flag only if such relaxations are not enabled and
+        * the conditions are not met.
+        */
+       if (iftype != NL80211_IFTYPE_P2P_GO ||
+           !cfg80211_go_permissive_chan(rdev, chandef->chan))
+               prohibited_flags |= IEEE80211_CHAN_NO_IR;
+
+       if (cfg80211_chandef_dfs_required(wiphy, chandef,
+                                         NL80211_IFTYPE_UNSPECIFIED) > 0 &&
            cfg80211_chandef_dfs_available(wiphy, chandef)) {
                /* We can skip IEEE80211_CHAN_NO_IR if chandef dfs available */
                prohibited_flags = IEEE80211_CHAN_DISABLED;
@@ -701,6 +827,8 @@ cfg80211_get_chan_state(struct wireless_dev *wdev,
                        enum cfg80211_chan_mode *chanmode,
                        u8 *radar_detect)
 {
+       int ret;
+
        *chan = NULL;
        *chanmode = CHAN_MODE_UNDEFINED;
 
@@ -743,8 +871,11 @@ cfg80211_get_chan_state(struct wireless_dev *wdev,
                        *chan = wdev->chandef.chan;
                        *chanmode = CHAN_MODE_SHARED;
 
-                       if (cfg80211_chandef_dfs_required(wdev->wiphy,
-                                                         &wdev->chandef))
+                       ret = cfg80211_chandef_dfs_required(wdev->wiphy,
+                                                           &wdev->chandef,
+                                                           wdev->iftype);
+                       WARN_ON(ret < 0);
+                       if (ret > 0)
                                *radar_detect |= BIT(wdev->chandef.width);
                }
                return;
@@ -753,8 +884,11 @@ cfg80211_get_chan_state(struct wireless_dev *wdev,
                        *chan = wdev->chandef.chan;
                        *chanmode = CHAN_MODE_SHARED;
 
-                       if (cfg80211_chandef_dfs_required(wdev->wiphy,
-                                                         &wdev->chandef))
+                       ret = cfg80211_chandef_dfs_required(wdev->wiphy,
+                                                           &wdev->chandef,
+                                                           wdev->iftype);
+                       WARN_ON(ret < 0);
+                       if (ret > 0)
                                *radar_detect |= BIT(wdev->chandef.width);
                }
                return;
index 086cddd03ba6edd79d1609ecf713146bc756c1ff..b3ff3697239a47b02d4008911c4c12bf7651b769 100644 (file)
@@ -69,7 +69,7 @@ struct cfg80211_registered_device *cfg80211_rdev_by_wiphy_idx(int wiphy_idx)
 
 int get_wiphy_idx(struct wiphy *wiphy)
 {
-       struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
 
        return rdev->wiphy_idx;
 }
@@ -260,6 +260,45 @@ static void cfg80211_event_work(struct work_struct *work)
        rtnl_unlock();
 }
 
+void cfg80211_destroy_ifaces(struct cfg80211_registered_device *rdev)
+{
+       struct cfg80211_iface_destroy *item;
+
+       ASSERT_RTNL();
+
+       spin_lock_irq(&rdev->destroy_list_lock);
+       while ((item = list_first_entry_or_null(&rdev->destroy_list,
+                                               struct cfg80211_iface_destroy,
+                                               list))) {
+               struct wireless_dev *wdev, *tmp;
+               u32 nlportid = item->nlportid;
+
+               list_del(&item->list);
+               kfree(item);
+               spin_unlock_irq(&rdev->destroy_list_lock);
+
+               list_for_each_entry_safe(wdev, tmp, &rdev->wdev_list, list) {
+                       if (nlportid == wdev->owner_nlportid)
+                               rdev_del_virtual_intf(rdev, wdev);
+               }
+
+               spin_lock_irq(&rdev->destroy_list_lock);
+       }
+       spin_unlock_irq(&rdev->destroy_list_lock);
+}
+
+static void cfg80211_destroy_iface_wk(struct work_struct *work)
+{
+       struct cfg80211_registered_device *rdev;
+
+       rdev = container_of(work, struct cfg80211_registered_device,
+                           destroy_work);
+
+       rtnl_lock();
+       cfg80211_destroy_ifaces(rdev);
+       rtnl_unlock();
+}
+
 /* exported functions */
 
 struct wiphy *wiphy_new(const struct cfg80211_ops *ops, int sizeof_priv)
@@ -318,6 +357,10 @@ struct wiphy *wiphy_new(const struct cfg80211_ops *ops, int sizeof_priv)
        rdev->wiphy.dev.class = &ieee80211_class;
        rdev->wiphy.dev.platform_data = rdev;
 
+       INIT_LIST_HEAD(&rdev->destroy_list);
+       spin_lock_init(&rdev->destroy_list_lock);
+       INIT_WORK(&rdev->destroy_work, cfg80211_destroy_iface_wk);
+
 #ifdef CONFIG_CFG80211_DEFAULT_PS
        rdev->wiphy.flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT;
 #endif
@@ -396,10 +439,7 @@ static int wiphy_verify_combinations(struct wiphy *wiphy)
                for (j = 0; j < c->n_limits; j++) {
                        u16 types = c->limits[j].types;
 
-                       /*
-                        * interface types shouldn't overlap, this is
-                        * used in cfg80211_can_change_interface()
-                        */
+                       /* interface types shouldn't overlap */
                        if (WARN_ON(types & all_iftypes))
                                return -EINVAL;
                        all_iftypes |= types;
@@ -435,7 +475,7 @@ static int wiphy_verify_combinations(struct wiphy *wiphy)
 
 int wiphy_register(struct wiphy *wiphy)
 {
-       struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
        int res;
        enum ieee80211_band band;
        struct ieee80211_supported_band *sband;
@@ -616,7 +656,7 @@ EXPORT_SYMBOL(wiphy_register);
 
 void wiphy_rfkill_start_polling(struct wiphy *wiphy)
 {
-       struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
 
        if (!rdev->ops->rfkill_poll)
                return;
@@ -627,7 +667,7 @@ EXPORT_SYMBOL(wiphy_rfkill_start_polling);
 
 void wiphy_rfkill_stop_polling(struct wiphy *wiphy)
 {
-       struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
 
        rfkill_pause_polling(rdev->rfkill);
 }
@@ -635,7 +675,7 @@ EXPORT_SYMBOL(wiphy_rfkill_stop_polling);
 
 void wiphy_unregister(struct wiphy *wiphy)
 {
-       struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
 
        wait_event(rdev->dev_wait, ({
                int __count;
@@ -675,6 +715,7 @@ void wiphy_unregister(struct wiphy *wiphy)
        cancel_work_sync(&rdev->conn_work);
        flush_work(&rdev->event_work);
        cancel_delayed_work_sync(&rdev->dfs_update_channels_wk);
+       flush_work(&rdev->destroy_work);
 
 #ifdef CONFIG_PM
        if (rdev->wiphy.wowlan_config && rdev->ops->set_wakeup)
@@ -707,7 +748,7 @@ EXPORT_SYMBOL(wiphy_free);
 
 void wiphy_rfkill_set_hw_state(struct wiphy *wiphy, bool blocked)
 {
-       struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
 
        if (rfkill_set_hw_state(rdev->rfkill, blocked))
                schedule_work(&rdev->rfkill_sync);
@@ -716,7 +757,7 @@ EXPORT_SYMBOL(wiphy_rfkill_set_hw_state);
 
 void cfg80211_unregister_wdev(struct wireless_dev *wdev)
 {
-       struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
 
        ASSERT_RTNL();
 
@@ -796,12 +837,11 @@ static int cfg80211_netdev_notifier_call(struct notifier_block *nb,
        struct net_device *dev = netdev_notifier_info_to_dev(ptr);
        struct wireless_dev *wdev = dev->ieee80211_ptr;
        struct cfg80211_registered_device *rdev;
-       int ret;
 
        if (!wdev)
                return NOTIFY_DONE;
 
-       rdev = wiphy_to_dev(wdev->wiphy);
+       rdev = wiphy_to_rdev(wdev->wiphy);
 
        WARN_ON(wdev->iftype == NL80211_IFTYPE_UNSPECIFIED);
 
@@ -959,13 +999,14 @@ static int cfg80211_netdev_notifier_call(struct notifier_block *nb,
        case NETDEV_PRE_UP:
                if (!(wdev->wiphy->interface_modes & BIT(wdev->iftype)))
                        return notifier_from_errno(-EOPNOTSUPP);
-               ret = cfg80211_can_add_interface(rdev, wdev->iftype);
-               if (ret)
-                       return notifier_from_errno(ret);
+               if (rfkill_blocked(rdev->rfkill))
+                       return notifier_from_errno(-ERFKILL);
                break;
+       default:
+               return NOTIFY_DONE;
        }
 
-       return NOTIFY_DONE;
+       return NOTIFY_OK;
 }
 
 static struct notifier_block cfg80211_netdev_notifier = {
index 5b1fdcadd46985548f4a04f4f64ddaccbc935661..681b8fa4355b09bdc78796388986deeb9022ea89 100644 (file)
@@ -80,13 +80,17 @@ struct cfg80211_registered_device {
 
        struct cfg80211_coalesce *coalesce;
 
+       spinlock_t destroy_list_lock;
+       struct list_head destroy_list;
+       struct work_struct destroy_work;
+
        /* must be last because of the way we do wiphy_priv(),
         * and it should at least be aligned to NETDEV_ALIGN */
        struct wiphy wiphy __aligned(NETDEV_ALIGN);
 };
 
 static inline
-struct cfg80211_registered_device *wiphy_to_dev(struct wiphy *wiphy)
+struct cfg80211_registered_device *wiphy_to_rdev(struct wiphy *wiphy)
 {
        BUG_ON(!wiphy);
        return container_of(wiphy, struct cfg80211_registered_device, wiphy);
@@ -232,6 +236,13 @@ struct cfg80211_beacon_registration {
        u32 nlportid;
 };
 
+struct cfg80211_iface_destroy {
+       struct list_head list;
+       u32 nlportid;
+};
+
+void cfg80211_destroy_ifaces(struct cfg80211_registered_device *rdev);
+
 /* free object */
 void cfg80211_dev_free(struct cfg80211_registered_device *rdev);
 
@@ -240,8 +251,8 @@ int cfg80211_dev_rename(struct cfg80211_registered_device *rdev,
 
 void ieee80211_set_bitrate_flags(struct wiphy *wiphy);
 
-void cfg80211_bss_expire(struct cfg80211_registered_device *dev);
-void cfg80211_bss_age(struct cfg80211_registered_device *dev,
+void cfg80211_bss_expire(struct cfg80211_registered_device *rdev);
+void cfg80211_bss_age(struct cfg80211_registered_device *rdev,
                       unsigned long age_secs);
 
 /* IBSS */
@@ -401,35 +412,6 @@ unsigned int
 cfg80211_chandef_dfs_cac_time(struct wiphy *wiphy,
                              const struct cfg80211_chan_def *chandef);
 
-static inline int
-cfg80211_can_change_interface(struct cfg80211_registered_device *rdev,
-                             struct wireless_dev *wdev,
-                             enum nl80211_iftype iftype)
-{
-       return cfg80211_can_use_iftype_chan(rdev, wdev, iftype, NULL,
-                                           CHAN_MODE_UNDEFINED, 0);
-}
-
-static inline int
-cfg80211_can_add_interface(struct cfg80211_registered_device *rdev,
-                          enum nl80211_iftype iftype)
-{
-       if (rfkill_blocked(rdev->rfkill))
-               return -ERFKILL;
-
-       return cfg80211_can_change_interface(rdev, NULL, iftype);
-}
-
-static inline int
-cfg80211_can_use_chan(struct cfg80211_registered_device *rdev,
-                     struct wireless_dev *wdev,
-                     struct ieee80211_channel *chan,
-                     enum cfg80211_chan_mode chanmode)
-{
-       return cfg80211_can_use_iftype_chan(rdev, wdev, wdev->iftype,
-                                           chan, chanmode, 0);
-}
-
 static inline unsigned int elapsed_jiffies_msecs(unsigned long start)
 {
        unsigned long end = jiffies;
index e37862f1b1270d8e2056fb6289f01bf69b583720..d4860bfc020e5a1c43758e8cca6e9508908344be 100644 (file)
@@ -43,7 +43,7 @@ static void cfg80211_get_ringparam(struct net_device *dev,
                                   struct ethtool_ringparam *rp)
 {
        struct wireless_dev *wdev = dev->ieee80211_ptr;
-       struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
 
        memset(rp, 0, sizeof(*rp));
 
@@ -56,7 +56,7 @@ static int cfg80211_set_ringparam(struct net_device *dev,
                                  struct ethtool_ringparam *rp)
 {
        struct wireless_dev *wdev = dev->ieee80211_ptr;
-       struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
 
        if (rp->rx_mini_pending != 0 || rp->rx_jumbo_pending != 0)
                return -EINVAL;
@@ -70,7 +70,7 @@ static int cfg80211_set_ringparam(struct net_device *dev,
 static int cfg80211_get_sset_count(struct net_device *dev, int sset)
 {
        struct wireless_dev *wdev = dev->ieee80211_ptr;
-       struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
        if (rdev->ops->get_et_sset_count)
                return rdev_get_et_sset_count(rdev, dev, sset);
        return -EOPNOTSUPP;
@@ -80,7 +80,7 @@ static void cfg80211_get_stats(struct net_device *dev,
                               struct ethtool_stats *stats, u64 *data)
 {
        struct wireless_dev *wdev = dev->ieee80211_ptr;
-       struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
        if (rdev->ops->get_et_stats)
                rdev_get_et_stats(rdev, dev, stats, data);
 }
@@ -88,7 +88,7 @@ static void cfg80211_get_stats(struct net_device *dev,
 static void cfg80211_get_strings(struct net_device *dev, u32 sset, u8 *data)
 {
        struct wireless_dev *wdev = dev->ieee80211_ptr;
-       struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
        if (rdev->ops->get_et_strings)
                rdev_get_et_strings(rdev, dev, sset, data);
 }
index a6b5bdad039c7450f276d1e56994661e2e41952c..6b50588b709f18658d6207f0df69b672f0e9a887 100644 (file)
@@ -45,7 +45,7 @@ void __cfg80211_ibss_joined(struct net_device *dev, const u8 *bssid,
 
        cfg80211_upload_connect_keys(wdev);
 
-       nl80211_send_ibss_bssid(wiphy_to_dev(wdev->wiphy), dev, bssid,
+       nl80211_send_ibss_bssid(wiphy_to_rdev(wdev->wiphy), dev, bssid,
                                GFP_KERNEL);
 #ifdef CONFIG_CFG80211_WEXT
        memset(&wrqu, 0, sizeof(wrqu));
@@ -58,7 +58,7 @@ void cfg80211_ibss_joined(struct net_device *dev, const u8 *bssid,
                          struct ieee80211_channel *channel, gfp_t gfp)
 {
        struct wireless_dev *wdev = dev->ieee80211_ptr;
-       struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
        struct cfg80211_event *ev;
        unsigned long flags;
 
@@ -88,8 +88,6 @@ static int __cfg80211_join_ibss(struct cfg80211_registered_device *rdev,
                                struct cfg80211_cached_keys *connkeys)
 {
        struct wireless_dev *wdev = dev->ieee80211_ptr;
-       struct ieee80211_channel *check_chan;
-       u8 radar_detect_width = 0;
        int err;
 
        ASSERT_WDEV_LOCK(wdev);
@@ -126,28 +124,6 @@ static int __cfg80211_join_ibss(struct cfg80211_registered_device *rdev,
 #ifdef CONFIG_CFG80211_WEXT
        wdev->wext.ibss.chandef = params->chandef;
 #endif
-       check_chan = params->chandef.chan;
-       if (params->userspace_handles_dfs) {
-               /* Check for radar even if the current channel is not
-                * a radar channel - it might decide to change to DFS
-                * channel later.
-                */
-               radar_detect_width = BIT(params->chandef.width);
-       }
-
-       err = cfg80211_can_use_iftype_chan(rdev, wdev, wdev->iftype,
-                                          check_chan,
-                                          (params->channel_fixed &&
-                                           !radar_detect_width)
-                                          ? CHAN_MODE_SHARED
-                                          : CHAN_MODE_EXCLUSIVE,
-                                          radar_detect_width);
-
-       if (err) {
-               wdev->connect_keys = NULL;
-               return err;
-       }
-
        err = rdev_join_ibss(rdev, dev, params);
        if (err) {
                wdev->connect_keys = NULL;
@@ -180,7 +156,7 @@ int cfg80211_join_ibss(struct cfg80211_registered_device *rdev,
 static void __cfg80211_clear_ibss(struct net_device *dev, bool nowext)
 {
        struct wireless_dev *wdev = dev->ieee80211_ptr;
-       struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
        int i;
 
        ASSERT_WDEV_LOCK(wdev);
@@ -335,7 +311,7 @@ int cfg80211_ibss_wext_siwfreq(struct net_device *dev,
                               struct iw_freq *wextfreq, char *extra)
 {
        struct wireless_dev *wdev = dev->ieee80211_ptr;
-       struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
        struct ieee80211_channel *chan = NULL;
        int err, freq;
 
@@ -346,7 +322,7 @@ int cfg80211_ibss_wext_siwfreq(struct net_device *dev,
        if (!rdev->ops->join_ibss)
                return -EOPNOTSUPP;
 
-       freq = cfg80211_wext_freq(wdev->wiphy, wextfreq);
+       freq = cfg80211_wext_freq(wextfreq);
        if (freq < 0)
                return freq;
 
@@ -420,7 +396,7 @@ int cfg80211_ibss_wext_siwessid(struct net_device *dev,
                                struct iw_point *data, char *ssid)
 {
        struct wireless_dev *wdev = dev->ieee80211_ptr;
-       struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
        size_t len = data->length;
        int err;
 
@@ -487,7 +463,7 @@ int cfg80211_ibss_wext_siwap(struct net_device *dev,
                             struct sockaddr *ap_addr, char *extra)
 {
        struct wireless_dev *wdev = dev->ieee80211_ptr;
-       struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
        u8 *bssid = ap_addr->sa_data;
        int err;
 
@@ -505,6 +481,9 @@ int cfg80211_ibss_wext_siwap(struct net_device *dev,
        if (is_zero_ether_addr(bssid) || is_broadcast_ether_addr(bssid))
                bssid = NULL;
 
+       if (bssid && !is_valid_ether_addr(bssid))
+               return -EINVAL;
+
        /* both automatic */
        if (!bssid && !wdev->wext.ibss.bssid)
                return 0;
index 5af5cc6b2c4c2406475a3063a69eef80cc14691f..3ddfb7cd335e6a8740109e51355070310db6ecb3 100644 (file)
@@ -99,7 +99,6 @@ int __cfg80211_join_mesh(struct cfg80211_registered_device *rdev,
                         const struct mesh_config *conf)
 {
        struct wireless_dev *wdev = dev->ieee80211_ptr;
-       u8 radar_detect_width = 0;
        int err;
 
        BUILD_BUG_ON(IEEE80211_MAX_SSID_LEN != IEEE80211_MAX_MESH_ID_LEN);
@@ -175,22 +174,10 @@ int __cfg80211_join_mesh(struct cfg80211_registered_device *rdev,
                                                               scan_width);
        }
 
-       if (!cfg80211_reg_can_beacon(&rdev->wiphy, &setup->chandef))
+       if (!cfg80211_reg_can_beacon(&rdev->wiphy, &setup->chandef,
+                                    NL80211_IFTYPE_MESH_POINT))
                return -EINVAL;
 
-       err = cfg80211_chandef_dfs_required(wdev->wiphy, &setup->chandef);
-       if (err < 0)
-               return err;
-       if (err)
-               radar_detect_width = BIT(setup->chandef.width);
-
-       err = cfg80211_can_use_iftype_chan(rdev, wdev, wdev->iftype,
-                                          setup->chandef.chan,
-                                          CHAN_MODE_SHARED,
-                                          radar_detect_width);
-       if (err)
-               return err;
-
        err = rdev_join_mesh(rdev, dev, conf, setup);
        if (!err) {
                memcpy(wdev->ssid, setup->mesh_id, setup->mesh_id_len);
@@ -236,17 +223,6 @@ int cfg80211_set_mesh_channel(struct cfg80211_registered_device *rdev,
                if (!netif_running(wdev->netdev))
                        return -ENETDOWN;
 
-               /* cfg80211_can_use_chan() calls
-                * cfg80211_can_use_iftype_chan() with no radar
-                * detection, so if we're trying to use a radar
-                * channel here, something is wrong.
-                */
-               WARN_ON_ONCE(chandef->chan->flags & IEEE80211_CHAN_RADAR);
-               err = cfg80211_can_use_chan(rdev, wdev, chandef->chan,
-                                           CHAN_MODE_SHARED);
-               if (err)
-                       return err;
-
                err = rdev_libertas_set_mesh_channel(rdev, wdev->netdev,
                                                     chandef->chan);
                if (!err)
index c52ff59a3e96d7cabb892bff220b86c580069a43..266766b8d80b61455565cc43779a6e229ed7710d 100644 (file)
@@ -23,7 +23,7 @@ void cfg80211_rx_assoc_resp(struct net_device *dev, struct cfg80211_bss *bss,
 {
        struct wireless_dev *wdev = dev->ieee80211_ptr;
        struct wiphy *wiphy = wdev->wiphy;
-       struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
        struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)buf;
        u8 *ie = mgmt->u.assoc_resp.variable;
        int ieoffs = offsetof(struct ieee80211_mgmt, u.assoc_resp.variable);
@@ -54,7 +54,7 @@ EXPORT_SYMBOL(cfg80211_rx_assoc_resp);
 static void cfg80211_process_auth(struct wireless_dev *wdev,
                                  const u8 *buf, size_t len)
 {
-       struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
 
        nl80211_send_rx_auth(rdev, wdev->netdev, buf, len, GFP_KERNEL);
        cfg80211_sme_rx_auth(wdev, buf, len);
@@ -63,7 +63,7 @@ static void cfg80211_process_auth(struct wireless_dev *wdev,
 static void cfg80211_process_deauth(struct wireless_dev *wdev,
                                    const u8 *buf, size_t len)
 {
-       struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
        struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)buf;
        const u8 *bssid = mgmt->bssid;
        u16 reason_code = le16_to_cpu(mgmt->u.deauth.reason_code);
@@ -82,7 +82,7 @@ static void cfg80211_process_deauth(struct wireless_dev *wdev,
 static void cfg80211_process_disassoc(struct wireless_dev *wdev,
                                      const u8 *buf, size_t len)
 {
-       struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
        struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)buf;
        const u8 *bssid = mgmt->bssid;
        u16 reason_code = le16_to_cpu(mgmt->u.disassoc.reason_code);
@@ -123,7 +123,7 @@ void cfg80211_auth_timeout(struct net_device *dev, const u8 *addr)
 {
        struct wireless_dev *wdev = dev->ieee80211_ptr;
        struct wiphy *wiphy = wdev->wiphy;
-       struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
 
        trace_cfg80211_send_auth_timeout(dev, addr);
 
@@ -136,7 +136,7 @@ void cfg80211_assoc_timeout(struct net_device *dev, struct cfg80211_bss *bss)
 {
        struct wireless_dev *wdev = dev->ieee80211_ptr;
        struct wiphy *wiphy = wdev->wiphy;
-       struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
 
        trace_cfg80211_send_assoc_timeout(dev, bss->bssid);
 
@@ -172,7 +172,7 @@ void cfg80211_michael_mic_failure(struct net_device *dev, const u8 *addr,
                                  const u8 *tsc, gfp_t gfp)
 {
        struct wiphy *wiphy = dev->ieee80211_ptr->wiphy;
-       struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
 #ifdef CONFIG_CFG80211_WEXT
        union iwreq_data wrqu;
        char *buf = kmalloc(128, gfp);
@@ -233,14 +233,8 @@ int cfg80211_mlme_auth(struct cfg80211_registered_device *rdev,
        if (!req.bss)
                return -ENOENT;
 
-       err = cfg80211_can_use_chan(rdev, wdev, req.bss->channel,
-                                   CHAN_MODE_SHARED);
-       if (err)
-               goto out;
-
        err = rdev_auth(rdev, dev, &req);
 
-out:
        cfg80211_put_bss(&rdev->wiphy, req.bss);
        return err;
 }
@@ -306,16 +300,10 @@ int cfg80211_mlme_assoc(struct cfg80211_registered_device *rdev,
        if (!req->bss)
                return -ENOENT;
 
-       err = cfg80211_can_use_chan(rdev, wdev, chan, CHAN_MODE_SHARED);
-       if (err)
-               goto out;
-
        err = rdev_assoc(rdev, dev, req);
        if (!err)
                cfg80211_hold_bss(bss_from_pub(req->bss));
-
-out:
-       if (err)
+       else
                cfg80211_put_bss(&rdev->wiphy, req->bss);
 
        return err;
@@ -414,7 +402,7 @@ int cfg80211_mlme_register_mgmt(struct wireless_dev *wdev, u32 snd_portid,
                                int match_len)
 {
        struct wiphy *wiphy = wdev->wiphy;
-       struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
        struct cfg80211_mgmt_registration *reg, *nreg;
        int err = 0;
        u16 mgmt_type;
@@ -473,7 +461,7 @@ int cfg80211_mlme_register_mgmt(struct wireless_dev *wdev, u32 snd_portid,
 void cfg80211_mlme_unregister_socket(struct wireless_dev *wdev, u32 nlportid)
 {
        struct wiphy *wiphy = wdev->wiphy;
-       struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
        struct cfg80211_mgmt_registration *reg, *tmp;
 
        spin_lock_bh(&wdev->mgmt_registrations_lock);
@@ -620,7 +608,7 @@ bool cfg80211_rx_mgmt(struct wireless_dev *wdev, int freq, int sig_mbm,
                      const u8 *buf, size_t len, u32 flags, gfp_t gfp)
 {
        struct wiphy *wiphy = wdev->wiphy;
-       struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
        struct cfg80211_mgmt_registration *reg;
        const struct ieee80211_txrx_stypes *stypes =
                &wiphy->mgmt_stypes[wdev->iftype];
@@ -739,7 +727,7 @@ void cfg80211_radar_event(struct wiphy *wiphy,
                          struct cfg80211_chan_def *chandef,
                          gfp_t gfp)
 {
-       struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
        unsigned long timeout;
 
        trace_cfg80211_radar_event(wiphy, chandef);
@@ -764,7 +752,7 @@ void cfg80211_cac_event(struct net_device *netdev,
 {
        struct wireless_dev *wdev = netdev->ieee80211_ptr;
        struct wiphy *wiphy = wdev->wiphy;
-       struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
        unsigned long timeout;
 
        trace_cfg80211_cac_event(netdev, event);
index 052c1bf8ffaceb92d3f117231a46fca78ed30216..0f1b18f209d6254800ac50f28b4d454017fe7e62 100644 (file)
@@ -168,8 +168,8 @@ __cfg80211_rdev_from_attrs(struct net *netns, struct nlattr **attrs)
                netdev = __dev_get_by_index(netns, ifindex);
                if (netdev) {
                        if (netdev->ieee80211_ptr)
-                               tmp = wiphy_to_dev(
-                                               netdev->ieee80211_ptr->wiphy);
+                               tmp = wiphy_to_rdev(
+                                       netdev->ieee80211_ptr->wiphy);
                        else
                                tmp = NULL;
 
@@ -385,6 +385,7 @@ static const struct nla_policy nl80211_policy[NL80211_ATTR_MAX+1] = {
        [NL80211_ATTR_MAC_HINT] = { .len = ETH_ALEN },
        [NL80211_ATTR_WIPHY_FREQ_HINT] = { .type = NLA_U32 },
        [NL80211_ATTR_TDLS_PEER_CAPABILITY] = { .type = NLA_U32 },
+       [NL80211_ATTR_IFACE_SOCKET_OWNER] = { .type = NLA_FLAG },
 };
 
 /* policy for the key attributes */
@@ -484,7 +485,7 @@ static int nl80211_prepare_wdev_dump(struct sk_buff *skb,
                        err = PTR_ERR(*wdev);
                        goto out_unlock;
                }
-               *rdev = wiphy_to_dev((*wdev)->wiphy);
+               *rdev = wiphy_to_rdev((*wdev)->wiphy);
                /* 0 is the first index - add 1 to parse only once */
                cb->args[0] = (*rdev)->wiphy_idx + 1;
                cb->args[1] = (*wdev)->identifier;
@@ -497,7 +498,7 @@ static int nl80211_prepare_wdev_dump(struct sk_buff *skb,
                        err = -ENODEV;
                        goto out_unlock;
                }
-               *rdev = wiphy_to_dev(wiphy);
+               *rdev = wiphy_to_rdev(wiphy);
                *wdev = NULL;
 
                list_for_each_entry(tmp, &(*rdev)->wdev_list, list) {
@@ -566,6 +567,13 @@ static int nl80211_msg_put_channel(struct sk_buff *msg,
                                   struct ieee80211_channel *chan,
                                   bool large)
 {
+       /* Some channels must be completely excluded from the
+        * list to protect old user-space tools from breaking
+        */
+       if (!large && chan->flags &
+           (IEEE80211_CHAN_NO_10MHZ | IEEE80211_CHAN_NO_20MHZ))
+               return 0;
+
        if (nla_put_u32(msg, NL80211_FREQUENCY_ATTR_FREQ,
                        chan->center_freq))
                goto nla_put_failure;
@@ -613,6 +621,18 @@ static int nl80211_msg_put_channel(struct sk_buff *msg,
                if ((chan->flags & IEEE80211_CHAN_NO_160MHZ) &&
                    nla_put_flag(msg, NL80211_FREQUENCY_ATTR_NO_160MHZ))
                        goto nla_put_failure;
+               if ((chan->flags & IEEE80211_CHAN_INDOOR_ONLY) &&
+                   nla_put_flag(msg, NL80211_FREQUENCY_ATTR_INDOOR_ONLY))
+                       goto nla_put_failure;
+               if ((chan->flags & IEEE80211_CHAN_GO_CONCURRENT) &&
+                   nla_put_flag(msg, NL80211_FREQUENCY_ATTR_GO_CONCURRENT))
+                       goto nla_put_failure;
+               if ((chan->flags & IEEE80211_CHAN_NO_20MHZ) &&
+                   nla_put_flag(msg, NL80211_FREQUENCY_ATTR_NO_20MHZ))
+                       goto nla_put_failure;
+               if ((chan->flags & IEEE80211_CHAN_NO_10MHZ) &&
+                   nla_put_flag(msg, NL80211_FREQUENCY_ATTR_NO_10MHZ))
+                       goto nla_put_failure;
        }
 
        if (nla_put_u32(msg, NL80211_FREQUENCY_ATTR_MAX_TX_POWER,
@@ -1006,42 +1026,42 @@ static int nl80211_send_wowlan_tcp_caps(struct cfg80211_registered_device *rdev,
 }
 
 static int nl80211_send_wowlan(struct sk_buff *msg,
-                              struct cfg80211_registered_device *dev,
+                              struct cfg80211_registered_device *rdev,
                               bool large)
 {
        struct nlattr *nl_wowlan;
 
-       if (!dev->wiphy.wowlan)
+       if (!rdev->wiphy.wowlan)
                return 0;
 
        nl_wowlan = nla_nest_start(msg, NL80211_ATTR_WOWLAN_TRIGGERS_SUPPORTED);
        if (!nl_wowlan)
                return -ENOBUFS;
 
-       if (((dev->wiphy.wowlan->flags & WIPHY_WOWLAN_ANY) &&
+       if (((rdev->wiphy.wowlan->flags & WIPHY_WOWLAN_ANY) &&
             nla_put_flag(msg, NL80211_WOWLAN_TRIG_ANY)) ||
-           ((dev->wiphy.wowlan->flags & WIPHY_WOWLAN_DISCONNECT) &&
+           ((rdev->wiphy.wowlan->flags & WIPHY_WOWLAN_DISCONNECT) &&
             nla_put_flag(msg, NL80211_WOWLAN_TRIG_DISCONNECT)) ||
-           ((dev->wiphy.wowlan->flags & WIPHY_WOWLAN_MAGIC_PKT) &&
+           ((rdev->wiphy.wowlan->flags & WIPHY_WOWLAN_MAGIC_PKT) &&
             nla_put_flag(msg, NL80211_WOWLAN_TRIG_MAGIC_PKT)) ||
-           ((dev->wiphy.wowlan->flags & WIPHY_WOWLAN_SUPPORTS_GTK_REKEY) &&
+           ((rdev->wiphy.wowlan->flags & WIPHY_WOWLAN_SUPPORTS_GTK_REKEY) &&
             nla_put_flag(msg, NL80211_WOWLAN_TRIG_GTK_REKEY_SUPPORTED)) ||
-           ((dev->wiphy.wowlan->flags & WIPHY_WOWLAN_GTK_REKEY_FAILURE) &&
+           ((rdev->wiphy.wowlan->flags & WIPHY_WOWLAN_GTK_REKEY_FAILURE) &&
             nla_put_flag(msg, NL80211_WOWLAN_TRIG_GTK_REKEY_FAILURE)) ||
-           ((dev->wiphy.wowlan->flags & WIPHY_WOWLAN_EAP_IDENTITY_REQ) &&
+           ((rdev->wiphy.wowlan->flags & WIPHY_WOWLAN_EAP_IDENTITY_REQ) &&
             nla_put_flag(msg, NL80211_WOWLAN_TRIG_EAP_IDENT_REQUEST)) ||
-           ((dev->wiphy.wowlan->flags & WIPHY_WOWLAN_4WAY_HANDSHAKE) &&
+           ((rdev->wiphy.wowlan->flags & WIPHY_WOWLAN_4WAY_HANDSHAKE) &&
             nla_put_flag(msg, NL80211_WOWLAN_TRIG_4WAY_HANDSHAKE)) ||
-           ((dev->wiphy.wowlan->flags & WIPHY_WOWLAN_RFKILL_RELEASE) &&
+           ((rdev->wiphy.wowlan->flags & WIPHY_WOWLAN_RFKILL_RELEASE) &&
             nla_put_flag(msg, NL80211_WOWLAN_TRIG_RFKILL_RELEASE)))
                return -ENOBUFS;
 
-       if (dev->wiphy.wowlan->n_patterns) {
+       if (rdev->wiphy.wowlan->n_patterns) {
                struct nl80211_pattern_support pat = {
-                       .max_patterns = dev->wiphy.wowlan->n_patterns,
-                       .min_pattern_len = dev->wiphy.wowlan->pattern_min_len,
-                       .max_pattern_len = dev->wiphy.wowlan->pattern_max_len,
-                       .max_pkt_offset = dev->wiphy.wowlan->max_pkt_offset,
+                       .max_patterns = rdev->wiphy.wowlan->n_patterns,
+                       .min_pattern_len = rdev->wiphy.wowlan->pattern_min_len,
+                       .max_pattern_len = rdev->wiphy.wowlan->pattern_max_len,
+                       .max_pkt_offset = rdev->wiphy.wowlan->max_pkt_offset,
                };
 
                if (nla_put(msg, NL80211_WOWLAN_TRIG_PKT_PATTERN,
@@ -1049,7 +1069,7 @@ static int nl80211_send_wowlan(struct sk_buff *msg,
                        return -ENOBUFS;
        }
 
-       if (large && nl80211_send_wowlan_tcp_caps(dev, msg))
+       if (large && nl80211_send_wowlan_tcp_caps(rdev, msg))
                return -ENOBUFS;
 
        nla_nest_end(msg, nl_wowlan);
@@ -1059,19 +1079,19 @@ static int nl80211_send_wowlan(struct sk_buff *msg,
 #endif
 
 static int nl80211_send_coalesce(struct sk_buff *msg,
-                                struct cfg80211_registered_device *dev)
+                                struct cfg80211_registered_device *rdev)
 {
        struct nl80211_coalesce_rule_support rule;
 
-       if (!dev->wiphy.coalesce)
+       if (!rdev->wiphy.coalesce)
                return 0;
 
-       rule.max_rules = dev->wiphy.coalesce->n_rules;
-       rule.max_delay = dev->wiphy.coalesce->max_delay;
-       rule.pat.max_patterns = dev->wiphy.coalesce->n_patterns;
-       rule.pat.min_pattern_len = dev->wiphy.coalesce->pattern_min_len;
-       rule.pat.max_pattern_len = dev->wiphy.coalesce->pattern_max_len;
-       rule.pat.max_pkt_offset = dev->wiphy.coalesce->max_pkt_offset;
+       rule.max_rules = rdev->wiphy.coalesce->n_rules;
+       rule.max_delay = rdev->wiphy.coalesce->max_delay;
+       rule.pat.max_patterns = rdev->wiphy.coalesce->n_patterns;
+       rule.pat.min_pattern_len = rdev->wiphy.coalesce->pattern_min_len;
+       rule.pat.max_pattern_len = rdev->wiphy.coalesce->pattern_max_len;
+       rule.pat.max_pkt_offset = rdev->wiphy.coalesce->max_pkt_offset;
 
        if (nla_put(msg, NL80211_ATTR_COALESCE_RULE, sizeof(rule), &rule))
                return -ENOBUFS;
@@ -1202,7 +1222,7 @@ struct nl80211_dump_wiphy_state {
        bool split;
 };
 
-static int nl80211_send_wiphy(struct cfg80211_registered_device *dev,
+static int nl80211_send_wiphy(struct cfg80211_registered_device *rdev,
                              struct sk_buff *msg, u32 portid, u32 seq,
                              int flags, struct nl80211_dump_wiphy_state *state)
 {
@@ -1214,7 +1234,7 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *dev,
        struct ieee80211_channel *chan;
        int i;
        const struct ieee80211_txrx_stypes *mgmt_stypes =
-                               dev->wiphy.mgmt_stypes;
+                               rdev->wiphy.mgmt_stypes;
        u32 features;
 
        hdr = nl80211hdr_put(msg, portid, seq, flags, NL80211_CMD_NEW_WIPHY);
@@ -1224,9 +1244,9 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *dev,
        if (WARN_ON(!state))
                return -EINVAL;
 
-       if (nla_put_u32(msg, NL80211_ATTR_WIPHY, dev->wiphy_idx) ||
+       if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
            nla_put_string(msg, NL80211_ATTR_WIPHY_NAME,
-                          wiphy_name(&dev->wiphy)) ||
+                          wiphy_name(&rdev->wiphy)) ||
            nla_put_u32(msg, NL80211_ATTR_GENERATION,
                        cfg80211_rdev_list_generation))
                goto nla_put_failure;
@@ -1234,43 +1254,43 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *dev,
        switch (state->split_start) {
        case 0:
                if (nla_put_u8(msg, NL80211_ATTR_WIPHY_RETRY_SHORT,
-                              dev->wiphy.retry_short) ||
+                              rdev->wiphy.retry_short) ||
                    nla_put_u8(msg, NL80211_ATTR_WIPHY_RETRY_LONG,
-                              dev->wiphy.retry_long) ||
+                              rdev->wiphy.retry_long) ||
                    nla_put_u32(msg, NL80211_ATTR_WIPHY_FRAG_THRESHOLD,
-                               dev->wiphy.frag_threshold) ||
+                               rdev->wiphy.frag_threshold) ||
                    nla_put_u32(msg, NL80211_ATTR_WIPHY_RTS_THRESHOLD,
-                               dev->wiphy.rts_threshold) ||
+                               rdev->wiphy.rts_threshold) ||
                    nla_put_u8(msg, NL80211_ATTR_WIPHY_COVERAGE_CLASS,
-                              dev->wiphy.coverage_class) ||
+                              rdev->wiphy.coverage_class) ||
                    nla_put_u8(msg, NL80211_ATTR_MAX_NUM_SCAN_SSIDS,
-                              dev->wiphy.max_scan_ssids) ||
+                              rdev->wiphy.max_scan_ssids) ||
                    nla_put_u8(msg, NL80211_ATTR_MAX_NUM_SCHED_SCAN_SSIDS,
-                              dev->wiphy.max_sched_scan_ssids) ||
+                              rdev->wiphy.max_sched_scan_ssids) ||
                    nla_put_u16(msg, NL80211_ATTR_MAX_SCAN_IE_LEN,
-                               dev->wiphy.max_scan_ie_len) ||
+                               rdev->wiphy.max_scan_ie_len) ||
                    nla_put_u16(msg, NL80211_ATTR_MAX_SCHED_SCAN_IE_LEN,
-                               dev->wiphy.max_sched_scan_ie_len) ||
+                               rdev->wiphy.max_sched_scan_ie_len) ||
                    nla_put_u8(msg, NL80211_ATTR_MAX_MATCH_SETS,
-                              dev->wiphy.max_match_sets))
+                              rdev->wiphy.max_match_sets))
                        goto nla_put_failure;
 
-               if ((dev->wiphy.flags & WIPHY_FLAG_IBSS_RSN) &&
+               if ((rdev->wiphy.flags & WIPHY_FLAG_IBSS_RSN) &&
                    nla_put_flag(msg, NL80211_ATTR_SUPPORT_IBSS_RSN))
                        goto nla_put_failure;
-               if ((dev->wiphy.flags & WIPHY_FLAG_MESH_AUTH) &&
+               if ((rdev->wiphy.flags & WIPHY_FLAG_MESH_AUTH) &&
                    nla_put_flag(msg, NL80211_ATTR_SUPPORT_MESH_AUTH))
                        goto nla_put_failure;
-               if ((dev->wiphy.flags & WIPHY_FLAG_AP_UAPSD) &&
+               if ((rdev->wiphy.flags & WIPHY_FLAG_AP_UAPSD) &&
                    nla_put_flag(msg, NL80211_ATTR_SUPPORT_AP_UAPSD))
                        goto nla_put_failure;
-               if ((dev->wiphy.flags & WIPHY_FLAG_SUPPORTS_FW_ROAM) &&
+               if ((rdev->wiphy.flags & WIPHY_FLAG_SUPPORTS_FW_ROAM) &&
                    nla_put_flag(msg, NL80211_ATTR_ROAM_SUPPORT))
                        goto nla_put_failure;
-               if ((dev->wiphy.flags & WIPHY_FLAG_SUPPORTS_TDLS) &&
+               if ((rdev->wiphy.flags & WIPHY_FLAG_SUPPORTS_TDLS) &&
                    nla_put_flag(msg, NL80211_ATTR_TDLS_SUPPORT))
                        goto nla_put_failure;
-               if ((dev->wiphy.flags & WIPHY_FLAG_TDLS_EXTERNAL_SETUP) &&
+               if ((rdev->wiphy.flags & WIPHY_FLAG_TDLS_EXTERNAL_SETUP) &&
                    nla_put_flag(msg, NL80211_ATTR_TDLS_EXTERNAL_SETUP))
                        goto nla_put_failure;
                state->split_start++;
@@ -1278,35 +1298,35 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *dev,
                        break;
        case 1:
                if (nla_put(msg, NL80211_ATTR_CIPHER_SUITES,
-                           sizeof(u32) * dev->wiphy.n_cipher_suites,
-                           dev->wiphy.cipher_suites))
+                           sizeof(u32) * rdev->wiphy.n_cipher_suites,
+                           rdev->wiphy.cipher_suites))
                        goto nla_put_failure;
 
                if (nla_put_u8(msg, NL80211_ATTR_MAX_NUM_PMKIDS,
-                              dev->wiphy.max_num_pmkids))
+                              rdev->wiphy.max_num_pmkids))
                        goto nla_put_failure;
 
-               if ((dev->wiphy.flags & WIPHY_FLAG_CONTROL_PORT_PROTOCOL) &&
+               if ((rdev->wiphy.flags & WIPHY_FLAG_CONTROL_PORT_PROTOCOL) &&
                    nla_put_flag(msg, NL80211_ATTR_CONTROL_PORT_ETHERTYPE))
                        goto nla_put_failure;
 
                if (nla_put_u32(msg, NL80211_ATTR_WIPHY_ANTENNA_AVAIL_TX,
-                               dev->wiphy.available_antennas_tx) ||
+                               rdev->wiphy.available_antennas_tx) ||
                    nla_put_u32(msg, NL80211_ATTR_WIPHY_ANTENNA_AVAIL_RX,
-                               dev->wiphy.available_antennas_rx))
+                               rdev->wiphy.available_antennas_rx))
                        goto nla_put_failure;
 
-               if ((dev->wiphy.flags & WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD) &&
+               if ((rdev->wiphy.flags & WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD) &&
                    nla_put_u32(msg, NL80211_ATTR_PROBE_RESP_OFFLOAD,
-                               dev->wiphy.probe_resp_offload))
+                               rdev->wiphy.probe_resp_offload))
                        goto nla_put_failure;
 
-               if ((dev->wiphy.available_antennas_tx ||
-                    dev->wiphy.available_antennas_rx) &&
-                   dev->ops->get_antenna) {
+               if ((rdev->wiphy.available_antennas_tx ||
+                    rdev->wiphy.available_antennas_rx) &&
+                   rdev->ops->get_antenna) {
                        u32 tx_ant = 0, rx_ant = 0;
                        int res;
-                       res = rdev_get_antenna(dev, &tx_ant, &rx_ant);
+                       res = rdev_get_antenna(rdev, &tx_ant, &rx_ant);
                        if (!res) {
                                if (nla_put_u32(msg,
                                                NL80211_ATTR_WIPHY_ANTENNA_TX,
@@ -1323,7 +1343,7 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *dev,
                        break;
        case 2:
                if (nl80211_put_iftypes(msg, NL80211_ATTR_SUPPORTED_IFTYPES,
-                                       dev->wiphy.interface_modes))
+                                       rdev->wiphy.interface_modes))
                                goto nla_put_failure;
                state->split_start++;
                if (state->split)
@@ -1337,7 +1357,7 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *dev,
                     band < IEEE80211_NUM_BANDS; band++) {
                        struct ieee80211_supported_band *sband;
 
-                       sband = dev->wiphy.bands[band];
+                       sband = rdev->wiphy.bands[band];
 
                        if (!sband)
                                continue;
@@ -1414,7 +1434,7 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *dev,
                i = 0;
 #define CMD(op, n)                                                     \
                 do {                                                   \
-                       if (dev->ops->op) {                             \
+                       if (rdev->ops->op) {                            \
                                i++;                                    \
                                if (nla_put_u32(msg, i, NL80211_CMD_ ## n)) \
                                        goto nla_put_failure;           \
@@ -1438,32 +1458,32 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *dev,
                CMD(set_pmksa, SET_PMKSA);
                CMD(del_pmksa, DEL_PMKSA);
                CMD(flush_pmksa, FLUSH_PMKSA);
-               if (dev->wiphy.flags & WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL)
+               if (rdev->wiphy.flags & WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL)
                        CMD(remain_on_channel, REMAIN_ON_CHANNEL);
                CMD(set_bitrate_mask, SET_TX_BITRATE_MASK);
                CMD(mgmt_tx, FRAME);
                CMD(mgmt_tx_cancel_wait, FRAME_WAIT_CANCEL);
-               if (dev->wiphy.flags & WIPHY_FLAG_NETNS_OK) {
+               if (rdev->wiphy.flags & WIPHY_FLAG_NETNS_OK) {
                        i++;
                        if (nla_put_u32(msg, i, NL80211_CMD_SET_WIPHY_NETNS))
                                goto nla_put_failure;
                }
-               if (dev->ops->set_monitor_channel || dev->ops->start_ap ||
-                   dev->ops->join_mesh) {
+               if (rdev->ops->set_monitor_channel || rdev->ops->start_ap ||
+                   rdev->ops->join_mesh) {
                        i++;
                        if (nla_put_u32(msg, i, NL80211_CMD_SET_CHANNEL))
                                goto nla_put_failure;
                }
                CMD(set_wds_peer, SET_WDS_PEER);
-               if (dev->wiphy.flags & WIPHY_FLAG_SUPPORTS_TDLS) {
+               if (rdev->wiphy.flags & WIPHY_FLAG_SUPPORTS_TDLS) {
                        CMD(tdls_mgmt, TDLS_MGMT);
                        CMD(tdls_oper, TDLS_OPER);
                }
-               if (dev->wiphy.flags & WIPHY_FLAG_SUPPORTS_SCHED_SCAN)
+               if (rdev->wiphy.flags & WIPHY_FLAG_SUPPORTS_SCHED_SCAN)
                        CMD(sched_scan_start, START_SCHED_SCAN);
                CMD(probe_client, PROBE_CLIENT);
                CMD(set_noack_map, SET_NOACK_MAP);
-               if (dev->wiphy.flags & WIPHY_FLAG_REPORTS_OBSS) {
+               if (rdev->wiphy.flags & WIPHY_FLAG_REPORTS_OBSS) {
                        i++;
                        if (nla_put_u32(msg, i, NL80211_CMD_REGISTER_BEACONS))
                                goto nla_put_failure;
@@ -1473,7 +1493,7 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *dev,
                if (state->split) {
                        CMD(crit_proto_start, CRIT_PROTOCOL_START);
                        CMD(crit_proto_stop, CRIT_PROTOCOL_STOP);
-                       if (dev->wiphy.flags & WIPHY_FLAG_HAS_CHANNEL_SWITCH)
+                       if (rdev->wiphy.flags & WIPHY_FLAG_HAS_CHANNEL_SWITCH)
                                CMD(channel_switch, CHANNEL_SWITCH);
                }
                CMD(set_qos_map, SET_QOS_MAP);
@@ -1484,13 +1504,13 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *dev,
 
 #undef CMD
 
-               if (dev->ops->connect || dev->ops->auth) {
+               if (rdev->ops->connect || rdev->ops->auth) {
                        i++;
                        if (nla_put_u32(msg, i, NL80211_CMD_CONNECT))
                                goto nla_put_failure;
                }
 
-               if (dev->ops->disconnect || dev->ops->deauth) {
+               if (rdev->ops->disconnect || rdev->ops->deauth) {
                        i++;
                        if (nla_put_u32(msg, i, NL80211_CMD_DISCONNECT))
                                goto nla_put_failure;
@@ -1501,14 +1521,14 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *dev,
                if (state->split)
                        break;
        case 5:
-               if (dev->ops->remain_on_channel &&
-                   (dev->wiphy.flags & WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL) &&
+               if (rdev->ops->remain_on_channel &&
+                   (rdev->wiphy.flags & WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL) &&
                    nla_put_u32(msg,
                                NL80211_ATTR_MAX_REMAIN_ON_CHANNEL_DURATION,
-                               dev->wiphy.max_remain_on_channel_duration))
+                               rdev->wiphy.max_remain_on_channel_duration))
                        goto nla_put_failure;
 
-               if ((dev->wiphy.flags & WIPHY_FLAG_OFFCHAN_TX) &&
+               if ((rdev->wiphy.flags & WIPHY_FLAG_OFFCHAN_TX) &&
                    nla_put_flag(msg, NL80211_ATTR_OFFCHANNEL_TX_OK))
                        goto nla_put_failure;
 
@@ -1519,7 +1539,7 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *dev,
                        break;
        case 6:
 #ifdef CONFIG_PM
-               if (nl80211_send_wowlan(msg, dev, state->split))
+               if (nl80211_send_wowlan(msg, rdev, state->split))
                        goto nla_put_failure;
                state->split_start++;
                if (state->split)
@@ -1529,10 +1549,10 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *dev,
 #endif
        case 7:
                if (nl80211_put_iftypes(msg, NL80211_ATTR_SOFTWARE_IFTYPES,
-                                       dev->wiphy.software_iftypes))
+                                       rdev->wiphy.software_iftypes))
                        goto nla_put_failure;
 
-               if (nl80211_put_iface_combinations(&dev->wiphy, msg,
+               if (nl80211_put_iface_combinations(&rdev->wiphy, msg,
                                                   state->split))
                        goto nla_put_failure;
 
@@ -1540,12 +1560,12 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *dev,
                if (state->split)
                        break;
        case 8:
-               if ((dev->wiphy.flags & WIPHY_FLAG_HAVE_AP_SME) &&
+               if ((rdev->wiphy.flags & WIPHY_FLAG_HAVE_AP_SME) &&
                    nla_put_u32(msg, NL80211_ATTR_DEVICE_AP_SME,
-                               dev->wiphy.ap_sme_capa))
+                               rdev->wiphy.ap_sme_capa))
                        goto nla_put_failure;
 
-               features = dev->wiphy.features;
+               features = rdev->wiphy.features;
                /*
                 * We can only add the per-channel limit information if the
                 * dump is split, otherwise it makes it too big. Therefore
@@ -1556,16 +1576,16 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *dev,
                if (nla_put_u32(msg, NL80211_ATTR_FEATURE_FLAGS, features))
                        goto nla_put_failure;
 
-               if (dev->wiphy.ht_capa_mod_mask &&
+               if (rdev->wiphy.ht_capa_mod_mask &&
                    nla_put(msg, NL80211_ATTR_HT_CAPABILITY_MASK,
-                           sizeof(*dev->wiphy.ht_capa_mod_mask),
-                           dev->wiphy.ht_capa_mod_mask))
+                           sizeof(*rdev->wiphy.ht_capa_mod_mask),
+                           rdev->wiphy.ht_capa_mod_mask))
                        goto nla_put_failure;
 
-               if (dev->wiphy.flags & WIPHY_FLAG_HAVE_AP_SME &&
-                   dev->wiphy.max_acl_mac_addrs &&
+               if (rdev->wiphy.flags & WIPHY_FLAG_HAVE_AP_SME &&
+                   rdev->wiphy.max_acl_mac_addrs &&
                    nla_put_u32(msg, NL80211_ATTR_MAC_ACL_MAX,
-                               dev->wiphy.max_acl_mac_addrs))
+                               rdev->wiphy.max_acl_mac_addrs))
                        goto nla_put_failure;
 
                /*
@@ -1581,41 +1601,41 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *dev,
                state->split_start++;
                break;
        case 9:
-               if (dev->wiphy.extended_capabilities &&
+               if (rdev->wiphy.extended_capabilities &&
                    (nla_put(msg, NL80211_ATTR_EXT_CAPA,
-                            dev->wiphy.extended_capabilities_len,
-                            dev->wiphy.extended_capabilities) ||
+                            rdev->wiphy.extended_capabilities_len,
+                            rdev->wiphy.extended_capabilities) ||
                     nla_put(msg, NL80211_ATTR_EXT_CAPA_MASK,
-                            dev->wiphy.extended_capabilities_len,
-                            dev->wiphy.extended_capabilities_mask)))
+                            rdev->wiphy.extended_capabilities_len,
+                            rdev->wiphy.extended_capabilities_mask)))
                        goto nla_put_failure;
 
-               if (dev->wiphy.vht_capa_mod_mask &&
+               if (rdev->wiphy.vht_capa_mod_mask &&
                    nla_put(msg, NL80211_ATTR_VHT_CAPABILITY_MASK,
-                           sizeof(*dev->wiphy.vht_capa_mod_mask),
-                           dev->wiphy.vht_capa_mod_mask))
+                           sizeof(*rdev->wiphy.vht_capa_mod_mask),
+                           rdev->wiphy.vht_capa_mod_mask))
                        goto nla_put_failure;
 
                state->split_start++;
                break;
        case 10:
-               if (nl80211_send_coalesce(msg, dev))
+               if (nl80211_send_coalesce(msg, rdev))
                        goto nla_put_failure;
 
-               if ((dev->wiphy.flags & WIPHY_FLAG_SUPPORTS_5_10_MHZ) &&
+               if ((rdev->wiphy.flags & WIPHY_FLAG_SUPPORTS_5_10_MHZ) &&
                    (nla_put_flag(msg, NL80211_ATTR_SUPPORT_5_MHZ) ||
                     nla_put_flag(msg, NL80211_ATTR_SUPPORT_10_MHZ)))
                        goto nla_put_failure;
 
-               if (dev->wiphy.max_ap_assoc_sta &&
+               if (rdev->wiphy.max_ap_assoc_sta &&
                    nla_put_u32(msg, NL80211_ATTR_MAX_AP_ASSOC_STA,
-                               dev->wiphy.max_ap_assoc_sta))
+                               rdev->wiphy.max_ap_assoc_sta))
                        goto nla_put_failure;
 
                state->split_start++;
                break;
        case 11:
-               if (dev->wiphy.n_vendor_commands) {
+               if (rdev->wiphy.n_vendor_commands) {
                        const struct nl80211_vendor_cmd_info *info;
                        struct nlattr *nested;
 
@@ -1623,15 +1643,15 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *dev,
                        if (!nested)
                                goto nla_put_failure;
 
-                       for (i = 0; i < dev->wiphy.n_vendor_commands; i++) {
-                               info = &dev->wiphy.vendor_commands[i].info;
+                       for (i = 0; i < rdev->wiphy.n_vendor_commands; i++) {
+                               info = &rdev->wiphy.vendor_commands[i].info;
                                if (nla_put(msg, i + 1, sizeof(*info), info))
                                        goto nla_put_failure;
                        }
                        nla_nest_end(msg, nested);
                }
 
-               if (dev->wiphy.n_vendor_events) {
+               if (rdev->wiphy.n_vendor_events) {
                        const struct nl80211_vendor_cmd_info *info;
                        struct nlattr *nested;
 
@@ -1640,8 +1660,8 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *dev,
                        if (!nested)
                                goto nla_put_failure;
 
-                       for (i = 0; i < dev->wiphy.n_vendor_events; i++) {
-                               info = &dev->wiphy.vendor_events[i];
+                       for (i = 0; i < rdev->wiphy.n_vendor_events; i++) {
+                               info = &rdev->wiphy.vendor_events[i];
                                if (nla_put(msg, i + 1, sizeof(*info), info))
                                        goto nla_put_failure;
                        }
@@ -1684,7 +1704,7 @@ static int nl80211_dump_wiphy_parse(struct sk_buff *skb,
                if (!netdev)
                        return -ENODEV;
                if (netdev->ieee80211_ptr) {
-                       rdev = wiphy_to_dev(
+                       rdev = wiphy_to_rdev(
                                netdev->ieee80211_ptr->wiphy);
                        state->filter_wiphy = rdev->wiphy_idx;
                }
@@ -1697,7 +1717,7 @@ static int nl80211_dump_wiphy(struct sk_buff *skb, struct netlink_callback *cb)
 {
        int idx = 0, ret;
        struct nl80211_dump_wiphy_state *state = (void *)cb->args[0];
-       struct cfg80211_registered_device *dev;
+       struct cfg80211_registered_device *rdev;
 
        rtnl_lock();
        if (!state) {
@@ -1716,17 +1736,17 @@ static int nl80211_dump_wiphy(struct sk_buff *skb, struct netlink_callback *cb)
                cb->args[0] = (long)state;
        }
 
-       list_for_each_entry(dev, &cfg80211_rdev_list, list) {
-               if (!net_eq(wiphy_net(&dev->wiphy), sock_net(skb->sk)))
+       list_for_each_entry(rdev, &cfg80211_rdev_list, list) {
+               if (!net_eq(wiphy_net(&rdev->wiphy), sock_net(skb->sk)))
                        continue;
                if (++idx <= state->start)
                        continue;
                if (state->filter_wiphy != -1 &&
-                   state->filter_wiphy != dev->wiphy_idx)
+                   state->filter_wiphy != rdev->wiphy_idx)
                        continue;
                /* attempt to fit multiple wiphy data chunks into the skb */
                do {
-                       ret = nl80211_send_wiphy(dev, skb,
+                       ret = nl80211_send_wiphy(rdev, skb,
                                                 NETLINK_CB(cb->skb).portid,
                                                 cb->nlh->nlmsg_seq,
                                                 NLM_F_MULTI, state);
@@ -1774,14 +1794,14 @@ static int nl80211_dump_wiphy_done(struct netlink_callback *cb)
 static int nl80211_get_wiphy(struct sk_buff *skb, struct genl_info *info)
 {
        struct sk_buff *msg;
-       struct cfg80211_registered_device *dev = info->user_ptr[0];
+       struct cfg80211_registered_device *rdev = info->user_ptr[0];
        struct nl80211_dump_wiphy_state state = {};
 
        msg = nlmsg_new(4096, GFP_KERNEL);
        if (!msg)
                return -ENOMEM;
 
-       if (nl80211_send_wiphy(dev, msg, info->snd_portid, info->snd_seq, 0,
+       if (nl80211_send_wiphy(rdev, msg, info->snd_portid, info->snd_seq, 0,
                               &state) < 0) {
                nlmsg_free(msg);
                return -ENOBUFS;
@@ -1908,18 +1928,20 @@ static int nl80211_parse_chandef(struct cfg80211_registered_device *rdev,
 }
 
 static int __nl80211_set_channel(struct cfg80211_registered_device *rdev,
-                                struct wireless_dev *wdev,
+                                struct net_device *dev,
                                 struct genl_info *info)
 {
        struct cfg80211_chan_def chandef;
        int result;
        enum nl80211_iftype iftype = NL80211_IFTYPE_MONITOR;
+       struct wireless_dev *wdev = NULL;
 
-       if (wdev)
-               iftype = wdev->iftype;
-
+       if (dev)
+               wdev = dev->ieee80211_ptr;
        if (!nl80211_can_set_dev_channel(wdev))
                return -EOPNOTSUPP;
+       if (wdev)
+               iftype = wdev->iftype;
 
        result = nl80211_parse_chandef(rdev, info, &chandef);
        if (result)
@@ -1928,14 +1950,27 @@ static int __nl80211_set_channel(struct cfg80211_registered_device *rdev,
        switch (iftype) {
        case NL80211_IFTYPE_AP:
        case NL80211_IFTYPE_P2P_GO:
-               if (wdev->beacon_interval) {
-                       result = -EBUSY;
-                       break;
-               }
-               if (!cfg80211_reg_can_beacon(&rdev->wiphy, &chandef)) {
+               if (!cfg80211_reg_can_beacon(&rdev->wiphy, &chandef, iftype)) {
                        result = -EINVAL;
                        break;
                }
+               if (wdev->beacon_interval) {
+                       if (!dev || !rdev->ops->set_ap_chanwidth ||
+                           !(rdev->wiphy.features &
+                             NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE)) {
+                               result = -EBUSY;
+                               break;
+                       }
+
+                       /* Only allow dynamic channel width changes */
+                       if (chandef.chan != wdev->preset_chandef.chan) {
+                               result = -EBUSY;
+                               break;
+                       }
+                       result = rdev_set_ap_chanwidth(rdev, dev, &chandef);
+                       if (result)
+                               break;
+               }
                wdev->preset_chandef = chandef;
                result = 0;
                break;
@@ -1957,7 +1992,7 @@ static int nl80211_set_channel(struct sk_buff *skb, struct genl_info *info)
        struct cfg80211_registered_device *rdev = info->user_ptr[0];
        struct net_device *netdev = info->user_ptr[1];
 
-       return __nl80211_set_channel(rdev, netdev->ieee80211_ptr, info);
+       return __nl80211_set_channel(rdev, netdev, info);
 }
 
 static int nl80211_set_wds_peer(struct sk_buff *skb, struct genl_info *info)
@@ -2013,7 +2048,7 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info)
 
                netdev = __dev_get_by_index(genl_info_net(info), ifindex);
                if (netdev && netdev->ieee80211_ptr)
-                       rdev = wiphy_to_dev(netdev->ieee80211_ptr->wiphy);
+                       rdev = wiphy_to_rdev(netdev->ieee80211_ptr->wiphy);
                else
                        netdev = NULL;
        }
@@ -2079,9 +2114,10 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info)
        }
 
        if (info->attrs[NL80211_ATTR_WIPHY_FREQ]) {
-               result = __nl80211_set_channel(rdev,
-                               nl80211_can_set_dev_channel(wdev) ? wdev : NULL,
-                               info);
+               result = __nl80211_set_channel(
+                       rdev,
+                       nl80211_can_set_dev_channel(wdev) ? netdev : NULL,
+                       info);
                if (result)
                        return result;
        }
@@ -2229,7 +2265,7 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info)
 static inline u64 wdev_id(struct wireless_dev *wdev)
 {
        return (u64)wdev->identifier |
-              ((u64)wiphy_to_dev(wdev->wiphy)->wiphy_idx << 32);
+              ((u64)wiphy_to_rdev(wdev->wiphy)->wiphy_idx << 32);
 }
 
 static int nl80211_send_chandef(struct sk_buff *msg,
@@ -2355,7 +2391,7 @@ static int nl80211_dump_interface(struct sk_buff *skb, struct netlink_callback *
 static int nl80211_get_interface(struct sk_buff *skb, struct genl_info *info)
 {
        struct sk_buff *msg;
-       struct cfg80211_registered_device *dev = info->user_ptr[0];
+       struct cfg80211_registered_device *rdev = info->user_ptr[0];
        struct wireless_dev *wdev = info->user_ptr[1];
 
        msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
@@ -2363,7 +2399,7 @@ static int nl80211_get_interface(struct sk_buff *skb, struct genl_info *info)
                return -ENOMEM;
 
        if (nl80211_send_iface(msg, info->snd_portid, info->snd_seq, 0,
-                              dev, wdev) < 0) {
+                              rdev, wdev) < 0) {
                nlmsg_free(msg);
                return -ENOBUFS;
        }
@@ -2514,6 +2550,9 @@ static int nl80211_new_interface(struct sk_buff *skb, struct genl_info *info)
        enum nl80211_iftype type = NL80211_IFTYPE_UNSPECIFIED;
        u32 flags;
 
+       /* to avoid failing a new interface creation due to pending removal */
+       cfg80211_destroy_ifaces(rdev);
+
        memset(&params, 0, sizeof(params));
 
        if (!info->attrs[NL80211_ATTR_IFNAME])
@@ -2563,6 +2602,9 @@ static int nl80211_new_interface(struct sk_buff *skb, struct genl_info *info)
                return PTR_ERR(wdev);
        }
 
+       if (info->attrs[NL80211_ATTR_IFACE_SOCKET_OWNER])
+               wdev->owner_nlportid = info->snd_portid;
+
        switch (type) {
        case NL80211_IFTYPE_MESH_POINT:
                if (!info->attrs[NL80211_ATTR_MESH_ID])
@@ -3142,7 +3184,6 @@ static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info)
        struct wireless_dev *wdev = dev->ieee80211_ptr;
        struct cfg80211_ap_settings params;
        int err;
-       u8 radar_detect_width = 0;
 
        if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP &&
            dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_GO)
@@ -3258,24 +3299,10 @@ static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info)
        } else if (!nl80211_get_ap_channel(rdev, &params))
                return -EINVAL;
 
-       if (!cfg80211_reg_can_beacon(&rdev->wiphy, &params.chandef))
+       if (!cfg80211_reg_can_beacon(&rdev->wiphy, &params.chandef,
+                                    wdev->iftype))
                return -EINVAL;
 
-       err = cfg80211_chandef_dfs_required(wdev->wiphy, &params.chandef);
-       if (err < 0)
-               return err;
-       if (err) {
-               radar_detect_width = BIT(params.chandef.width);
-               params.radar_required = true;
-       }
-
-       err = cfg80211_can_use_iftype_chan(rdev, wdev, wdev->iftype,
-                                          params.chandef.chan,
-                                          CHAN_MODE_SHARED,
-                                          radar_detect_width);
-       if (err)
-               return err;
-
        if (info->attrs[NL80211_ATTR_ACL_POLICY]) {
                params.acl = parse_acl_data(&rdev->wiphy, info);
                if (IS_ERR(params.acl))
@@ -3675,13 +3702,13 @@ static int nl80211_dump_station(struct sk_buff *skb,
                                struct netlink_callback *cb)
 {
        struct station_info sinfo;
-       struct cfg80211_registered_device *dev;
+       struct cfg80211_registered_device *rdev;
        struct wireless_dev *wdev;
        u8 mac_addr[ETH_ALEN];
        int sta_idx = cb->args[2];
        int err;
 
-       err = nl80211_prepare_wdev_dump(skb, cb, &dev, &wdev);
+       err = nl80211_prepare_wdev_dump(skb, cb, &rdev, &wdev);
        if (err)
                return err;
 
@@ -3690,14 +3717,14 @@ static int nl80211_dump_station(struct sk_buff *skb,
                goto out_err;
        }
 
-       if (!dev->ops->dump_station) {
+       if (!rdev->ops->dump_station) {
                err = -EOPNOTSUPP;
                goto out_err;
        }
 
        while (1) {
                memset(&sinfo, 0, sizeof(sinfo));
-               err = rdev_dump_station(dev, wdev->netdev, sta_idx,
+               err = rdev_dump_station(rdev, wdev->netdev, sta_idx,
                                        mac_addr, &sinfo);
                if (err == -ENOENT)
                        break;
@@ -3707,7 +3734,7 @@ static int nl80211_dump_station(struct sk_buff *skb,
                if (nl80211_send_station(skb,
                                NETLINK_CB(cb->skb).portid,
                                cb->nlh->nlmsg_seq, NLM_F_MULTI,
-                               dev, wdev->netdev, mac_addr,
+                               rdev, wdev->netdev, mac_addr,
                                &sinfo) < 0)
                        goto out;
 
@@ -3719,7 +3746,7 @@ static int nl80211_dump_station(struct sk_buff *skb,
        cb->args[2] = sta_idx;
        err = skb->len;
  out_err:
-       nl80211_finish_wdev_dump(dev);
+       nl80211_finish_wdev_dump(rdev);
 
        return err;
 }
@@ -4380,18 +4407,18 @@ static int nl80211_dump_mpath(struct sk_buff *skb,
                              struct netlink_callback *cb)
 {
        struct mpath_info pinfo;
-       struct cfg80211_registered_device *dev;
+       struct cfg80211_registered_device *rdev;
        struct wireless_dev *wdev;
        u8 dst[ETH_ALEN];
        u8 next_hop[ETH_ALEN];
        int path_idx = cb->args[2];
        int err;
 
-       err = nl80211_prepare_wdev_dump(skb, cb, &dev, &wdev);
+       err = nl80211_prepare_wdev_dump(skb, cb, &rdev, &wdev);
        if (err)
                return err;
 
-       if (!dev->ops->dump_mpath) {
+       if (!rdev->ops->dump_mpath) {
                err = -EOPNOTSUPP;
                goto out_err;
        }
@@ -4402,7 +4429,7 @@ static int nl80211_dump_mpath(struct sk_buff *skb,
        }
 
        while (1) {
-               err = rdev_dump_mpath(dev, wdev->netdev, path_idx, dst,
+               err = rdev_dump_mpath(rdev, wdev->netdev, path_idx, dst,
                                      next_hop, &pinfo);
                if (err == -ENOENT)
                        break;
@@ -4423,7 +4450,7 @@ static int nl80211_dump_mpath(struct sk_buff *skb,
        cb->args[2] = path_idx;
        err = skb->len;
  out_err:
-       nl80211_finish_wdev_dump(dev);
+       nl80211_finish_wdev_dump(rdev);
        return err;
 }
 
@@ -4663,7 +4690,6 @@ static int parse_reg_rule(struct nlattr *tb[],
 
 static int nl80211_req_set_reg(struct sk_buff *skb, struct genl_info *info)
 {
-       int r;
        char *data = NULL;
        enum nl80211_user_reg_hint_type user_reg_hint_type;
 
@@ -4676,11 +4702,6 @@ static int nl80211_req_set_reg(struct sk_buff *skb, struct genl_info *info)
        if (unlikely(!rcu_access_pointer(cfg80211_regdomain)))
                return -EINPROGRESS;
 
-       if (!info->attrs[NL80211_ATTR_REG_ALPHA2])
-               return -EINVAL;
-
-       data = nla_data(info->attrs[NL80211_ATTR_REG_ALPHA2]);
-
        if (info->attrs[NL80211_ATTR_USER_REG_HINT_TYPE])
                user_reg_hint_type =
                  nla_get_u32(info->attrs[NL80211_ATTR_USER_REG_HINT_TYPE]);
@@ -4690,14 +4711,16 @@ static int nl80211_req_set_reg(struct sk_buff *skb, struct genl_info *info)
        switch (user_reg_hint_type) {
        case NL80211_USER_REG_HINT_USER:
        case NL80211_USER_REG_HINT_CELL_BASE:
-               break;
+               if (!info->attrs[NL80211_ATTR_REG_ALPHA2])
+                       return -EINVAL;
+
+               data = nla_data(info->attrs[NL80211_ATTR_REG_ALPHA2]);
+               return regulatory_hint_user(data, user_reg_hint_type);
+       case NL80211_USER_REG_HINT_INDOOR:
+               return regulatory_hint_indoor_user();
        default:
                return -EINVAL;
        }
-
-       r = regulatory_hint_user(data, user_reg_hint_type);
-
-       return r;
 }
 
 static int nl80211_get_mesh_config(struct sk_buff *skb,
@@ -5796,7 +5819,8 @@ static int nl80211_start_radar_detection(struct sk_buff *skb,
        if (wdev->cac_started)
                return -EBUSY;
 
-       err = cfg80211_chandef_dfs_required(wdev->wiphy, &chandef);
+       err = cfg80211_chandef_dfs_required(wdev->wiphy, &chandef,
+                                           NL80211_IFTYPE_UNSPECIFIED);
        if (err < 0)
                return err;
 
@@ -5809,12 +5833,6 @@ static int nl80211_start_radar_detection(struct sk_buff *skb,
        if (!rdev->ops->start_radar_detection)
                return -EOPNOTSUPP;
 
-       err = cfg80211_can_use_iftype_chan(rdev, wdev, wdev->iftype,
-                                          chandef.chan, CHAN_MODE_SHARED,
-                                          BIT(chandef.width));
-       if (err)
-               return err;
-
        cac_time_ms = cfg80211_chandef_dfs_cac_time(&rdev->wiphy, &chandef);
        if (WARN_ON(!cac_time_ms))
                cac_time_ms = IEEE80211_DFS_MIN_CAC_TIME_MS;
@@ -5928,27 +5946,25 @@ skip_beacons:
        if (err)
                return err;
 
-       if (!cfg80211_reg_can_beacon(&rdev->wiphy, &params.chandef))
+       if (!cfg80211_reg_can_beacon(&rdev->wiphy, &params.chandef,
+                                    wdev->iftype))
                return -EINVAL;
 
-       switch (dev->ieee80211_ptr->iftype) {
-       case NL80211_IFTYPE_AP:
-       case NL80211_IFTYPE_P2P_GO:
-       case NL80211_IFTYPE_ADHOC:
-       case NL80211_IFTYPE_MESH_POINT:
-               err = cfg80211_chandef_dfs_required(wdev->wiphy,
-                                                   &params.chandef);
-               if (err < 0)
-                       return err;
-               if (err) {
-                       radar_detect_width = BIT(params.chandef.width);
-                       params.radar_required = true;
-               }
-               break;
-       default:
-               break;
+       err = cfg80211_chandef_dfs_required(wdev->wiphy,
+                                           &params.chandef,
+                                           wdev->iftype);
+       if (err < 0)
+               return err;
+
+       if (err > 0) {
+               radar_detect_width = BIT(params.chandef.width);
+               params.radar_required = true;
        }
 
+       /* TODO: I left this here for now.  With channel switch, the
+        * verification is a bit more complicated, because we only do
+        * it later when the channel switch really happens.
+        */
        err = cfg80211_can_use_iftype_chan(rdev, wdev, wdev->iftype,
                                           params.chandef.chan,
                                           CHAN_MODE_SHARED,
@@ -6175,12 +6191,12 @@ static int nl80211_dump_survey(struct sk_buff *skb,
                        struct netlink_callback *cb)
 {
        struct survey_info survey;
-       struct cfg80211_registered_device *dev;
+       struct cfg80211_registered_device *rdev;
        struct wireless_dev *wdev;
        int survey_idx = cb->args[2];
        int res;
 
-       res = nl80211_prepare_wdev_dump(skb, cb, &dev, &wdev);
+       res = nl80211_prepare_wdev_dump(skb, cb, &rdev, &wdev);
        if (res)
                return res;
 
@@ -6189,7 +6205,7 @@ static int nl80211_dump_survey(struct sk_buff *skb,
                goto out_err;
        }
 
-       if (!dev->ops->dump_survey) {
+       if (!rdev->ops->dump_survey) {
                res = -EOPNOTSUPP;
                goto out_err;
        }
@@ -6197,7 +6213,7 @@ static int nl80211_dump_survey(struct sk_buff *skb,
        while (1) {
                struct ieee80211_channel *chan;
 
-               res = rdev_dump_survey(dev, wdev->netdev, survey_idx, &survey);
+               res = rdev_dump_survey(rdev, wdev->netdev, survey_idx, &survey);
                if (res == -ENOENT)
                        break;
                if (res)
@@ -6209,7 +6225,7 @@ static int nl80211_dump_survey(struct sk_buff *skb,
                        goto out;
                }
 
-               chan = ieee80211_get_channel(&dev->wiphy,
+               chan = ieee80211_get_channel(&rdev->wiphy,
                                             survey.channel->center_freq);
                if (!chan || chan->flags & IEEE80211_CHAN_DISABLED) {
                        survey_idx++;
@@ -6228,7 +6244,7 @@ static int nl80211_dump_survey(struct sk_buff *skb,
        cb->args[2] = survey_idx;
        res = skb->len;
  out_err:
-       nl80211_finish_wdev_dump(dev);
+       nl80211_finish_wdev_dump(rdev);
        return res;
 }
 
@@ -6704,7 +6720,8 @@ static int nl80211_join_ibss(struct sk_buff *skb, struct genl_info *info)
        if (err)
                return err;
 
-       if (!cfg80211_reg_can_beacon(&rdev->wiphy, &ibss.chandef))
+       if (!cfg80211_reg_can_beacon(&rdev->wiphy, &ibss.chandef,
+                                    NL80211_IFTYPE_ADHOC))
                return -EINVAL;
 
        switch (ibss.chandef.width) {
@@ -6879,7 +6896,7 @@ struct sk_buff *__cfg80211_alloc_event_skb(struct wiphy *wiphy,
                                           int vendor_event_idx,
                                           int approxlen, gfp_t gfp)
 {
-       struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
        const struct nl80211_vendor_cmd_info *info;
 
        switch (cmd) {
@@ -8981,9 +8998,8 @@ static int nl80211_start_p2p_device(struct sk_buff *skb, struct genl_info *info)
        if (wdev->p2p_started)
                return 0;
 
-       err = cfg80211_can_add_interface(rdev, wdev->iftype);
-       if (err)
-               return err;
+       if (rfkill_blocked(rdev->rfkill))
+               return -ERFKILL;
 
        err = rdev_start_p2p_device(rdev, wdev);
        if (err)
@@ -9192,7 +9208,7 @@ struct sk_buff *__cfg80211_alloc_reply_skb(struct wiphy *wiphy,
                                           enum nl80211_attrs attr,
                                           int approxlen)
 {
-       struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
 
        if (WARN_ON(!rdev->cur_cmd_info))
                return NULL;
@@ -9316,7 +9332,7 @@ static int nl80211_pre_doit(const struct genl_ops *ops, struct sk_buff *skb,
                }
 
                dev = wdev->netdev;
-               rdev = wiphy_to_dev(wdev->wiphy);
+               rdev = wiphy_to_rdev(wdev->wiphy);
 
                if (ops->internal_flags & NL80211_FLAG_NEED_NETDEV) {
                        if (!dev) {
@@ -10345,7 +10361,7 @@ void cfg80211_rx_unprot_mlme_mgmt(struct net_device *dev, const u8 *buf,
 {
        struct wireless_dev *wdev = dev->ieee80211_ptr;
        struct wiphy *wiphy = wdev->wiphy;
-       struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
        const struct ieee80211_mgmt *mgmt = (void *)buf;
        u32 cmd;
 
@@ -10567,7 +10583,7 @@ void cfg80211_notify_new_peer_candidate(struct net_device *dev, const u8 *addr,
                                        const u8* ie, u8 ie_len, gfp_t gfp)
 {
        struct wireless_dev *wdev = dev->ieee80211_ptr;
-       struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
        struct sk_buff *msg;
        void *hdr;
 
@@ -10747,7 +10763,7 @@ void cfg80211_ready_on_channel(struct wireless_dev *wdev, u64 cookie,
                               unsigned int duration, gfp_t gfp)
 {
        struct wiphy *wiphy = wdev->wiphy;
-       struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
 
        trace_cfg80211_ready_on_channel(wdev, cookie, chan, duration);
        nl80211_send_remain_on_chan_event(NL80211_CMD_REMAIN_ON_CHANNEL,
@@ -10761,7 +10777,7 @@ void cfg80211_remain_on_channel_expired(struct wireless_dev *wdev, u64 cookie,
                                        gfp_t gfp)
 {
        struct wiphy *wiphy = wdev->wiphy;
-       struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
 
        trace_cfg80211_ready_on_channel_expired(wdev, cookie, chan);
        nl80211_send_remain_on_chan_event(NL80211_CMD_CANCEL_REMAIN_ON_CHANNEL,
@@ -10773,7 +10789,7 @@ void cfg80211_new_sta(struct net_device *dev, const u8 *mac_addr,
                      struct station_info *sinfo, gfp_t gfp)
 {
        struct wiphy *wiphy = dev->ieee80211_ptr->wiphy;
-       struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
        struct sk_buff *msg;
 
        trace_cfg80211_new_sta(dev, mac_addr, sinfo);
@@ -10796,7 +10812,7 @@ EXPORT_SYMBOL(cfg80211_new_sta);
 void cfg80211_del_sta(struct net_device *dev, const u8 *mac_addr, gfp_t gfp)
 {
        struct wiphy *wiphy = dev->ieee80211_ptr->wiphy;
-       struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
        struct sk_buff *msg;
        void *hdr;
 
@@ -10833,7 +10849,7 @@ void cfg80211_conn_failed(struct net_device *dev, const u8 *mac_addr,
                          gfp_t gfp)
 {
        struct wiphy *wiphy = dev->ieee80211_ptr->wiphy;
-       struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
        struct sk_buff *msg;
        void *hdr;
 
@@ -10868,7 +10884,7 @@ static bool __nl80211_unexpected_frame(struct net_device *dev, u8 cmd,
                                       const u8 *addr, gfp_t gfp)
 {
        struct wireless_dev *wdev = dev->ieee80211_ptr;
-       struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
        struct sk_buff *msg;
        void *hdr;
        u32 nlportid = ACCESS_ONCE(wdev->ap_unexpected_nlportid);
@@ -10988,7 +11004,7 @@ void cfg80211_mgmt_tx_status(struct wireless_dev *wdev, u64 cookie,
                             const u8 *buf, size_t len, bool ack, gfp_t gfp)
 {
        struct wiphy *wiphy = wdev->wiphy;
-       struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
        struct net_device *netdev = wdev->netdev;
        struct sk_buff *msg;
        void *hdr;
@@ -11032,7 +11048,7 @@ void cfg80211_cqm_rssi_notify(struct net_device *dev,
 {
        struct wireless_dev *wdev = dev->ieee80211_ptr;
        struct wiphy *wiphy = wdev->wiphy;
-       struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
        struct sk_buff *msg;
        struct nlattr *pinfoattr;
        void *hdr;
@@ -11124,7 +11140,7 @@ void cfg80211_gtk_rekey_notify(struct net_device *dev, const u8 *bssid,
 {
        struct wireless_dev *wdev = dev->ieee80211_ptr;
        struct wiphy *wiphy = wdev->wiphy;
-       struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
 
        trace_cfg80211_gtk_rekey_notify(dev, bssid);
        nl80211_gtk_rekey_notify(rdev, dev, bssid, replay_ctr, gfp);
@@ -11182,7 +11198,7 @@ void cfg80211_pmksa_candidate_notify(struct net_device *dev, int index,
 {
        struct wireless_dev *wdev = dev->ieee80211_ptr;
        struct wiphy *wiphy = wdev->wiphy;
-       struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
 
        trace_cfg80211_pmksa_candidate_notify(dev, index, bssid, preauth);
        nl80211_pmksa_candidate_notify(rdev, dev, index, bssid, preauth, gfp);
@@ -11229,7 +11245,7 @@ void cfg80211_ch_switch_notify(struct net_device *dev,
 {
        struct wireless_dev *wdev = dev->ieee80211_ptr;
        struct wiphy *wiphy = wdev->wiphy;
-       struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
 
        ASSERT_WDEV_LOCK(wdev);
 
@@ -11253,7 +11269,7 @@ void cfg80211_cqm_txe_notify(struct net_device *dev,
 {
        struct wireless_dev *wdev = dev->ieee80211_ptr;
        struct wiphy *wiphy = wdev->wiphy;
-       struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
        struct sk_buff *msg;
        struct nlattr *pinfoattr;
        void *hdr;
@@ -11353,7 +11369,7 @@ void cfg80211_cqm_pktloss_notify(struct net_device *dev,
 {
        struct wireless_dev *wdev = dev->ieee80211_ptr;
        struct wiphy *wiphy = wdev->wiphy;
-       struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
        struct sk_buff *msg;
        struct nlattr *pinfoattr;
        void *hdr;
@@ -11400,7 +11416,7 @@ void cfg80211_probe_status(struct net_device *dev, const u8 *addr,
                           u64 cookie, bool acked, gfp_t gfp)
 {
        struct wireless_dev *wdev = dev->ieee80211_ptr;
-       struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
        struct sk_buff *msg;
        void *hdr;
 
@@ -11440,7 +11456,7 @@ void cfg80211_report_obss_beacon(struct wiphy *wiphy,
                                 const u8 *frame, size_t len,
                                 int freq, int sig_dbm)
 {
-       struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
        struct sk_buff *msg;
        void *hdr;
        struct cfg80211_beacon_registration *reg;
@@ -11487,7 +11503,7 @@ void cfg80211_report_wowlan_wakeup(struct wireless_dev *wdev,
                                   struct cfg80211_wowlan_wakeup *wakeup,
                                   gfp_t gfp)
 {
-       struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
        struct sk_buff *msg;
        void *hdr;
        int size = 200;
@@ -11597,7 +11613,7 @@ void cfg80211_tdls_oper_request(struct net_device *dev, const u8 *peer,
                                u16 reason_code, gfp_t gfp)
 {
        struct wireless_dev *wdev = dev->ieee80211_ptr;
-       struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
        struct sk_buff *msg;
        void *hdr;
 
@@ -11649,9 +11665,15 @@ static int nl80211_netlink_notify(struct notifier_block * nb,
        rcu_read_lock();
 
        list_for_each_entry_rcu(rdev, &cfg80211_rdev_list, list) {
-               list_for_each_entry_rcu(wdev, &rdev->wdev_list, list)
+               bool schedule_destroy_work = false;
+
+               list_for_each_entry_rcu(wdev, &rdev->wdev_list, list) {
                        cfg80211_mlme_unregister_socket(wdev, notify->portid);
 
+                       if (wdev->owner_nlportid == notify->portid)
+                               schedule_destroy_work = true;
+               }
+
                spin_lock_bh(&rdev->beacon_registrations_lock);
                list_for_each_entry_safe(reg, tmp, &rdev->beacon_registrations,
                                         list) {
@@ -11662,11 +11684,24 @@ static int nl80211_netlink_notify(struct notifier_block * nb,
                        }
                }
                spin_unlock_bh(&rdev->beacon_registrations_lock);
+
+               if (schedule_destroy_work) {
+                       struct cfg80211_iface_destroy *destroy;
+
+                       destroy = kzalloc(sizeof(*destroy), GFP_ATOMIC);
+                       if (destroy) {
+                               destroy->nlportid = notify->portid;
+                               spin_lock(&rdev->destroy_list_lock);
+                               list_add(&destroy->list, &rdev->destroy_list);
+                               spin_unlock(&rdev->destroy_list_lock);
+                               schedule_work(&rdev->destroy_work);
+                       }
+               }
        }
 
        rcu_read_unlock();
 
-       return NOTIFY_DONE;
+       return NOTIFY_OK;
 }
 
 static struct notifier_block nl80211_netlink_notifier = {
@@ -11677,7 +11712,7 @@ void cfg80211_ft_event(struct net_device *netdev,
                       struct cfg80211_ft_event_params *ft_event)
 {
        struct wiphy *wiphy = netdev->ieee80211_ptr->wiphy;
-       struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
        struct sk_buff *msg;
        void *hdr;
 
@@ -11724,7 +11759,7 @@ void cfg80211_crit_proto_stopped(struct wireless_dev *wdev, gfp_t gfp)
        void *hdr;
        u32 nlportid;
 
-       rdev = wiphy_to_dev(wdev->wiphy);
+       rdev = wiphy_to_rdev(wdev->wiphy);
        if (!rdev->crit_proto_nlportid)
                return;
 
@@ -11759,7 +11794,7 @@ EXPORT_SYMBOL(cfg80211_crit_proto_stopped);
 void nl80211_send_ap_stopped(struct wireless_dev *wdev)
 {
        struct wiphy *wiphy = wdev->wiphy;
-       struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
        struct sk_buff *msg;
        void *hdr;
 
index 74d97d33c938e8250ef300c2c3fb82d2a39b63b6..00cdf73ba6c468093df5b6c4a8567bf160f1c1e4 100644 (file)
@@ -950,4 +950,17 @@ static inline int rdev_set_qos_map(struct cfg80211_registered_device *rdev,
        return ret;
 }
 
+static inline int
+rdev_set_ap_chanwidth(struct cfg80211_registered_device *rdev,
+                     struct net_device *dev, struct cfg80211_chan_def *chandef)
+{
+       int ret;
+
+       trace_rdev_set_ap_chanwidth(&rdev->wiphy, dev, chandef);
+       ret = rdev->ops->set_ap_chanwidth(&rdev->wiphy, dev, chandef);
+       trace_rdev_return_int(&rdev->wiphy, ret);
+
+       return ret;
+}
+
 #endif /* __CFG80211_RDEV_OPS */
index f59aaac586f8cf10905135324c3913646910a662..e78f532aaa5b386f862f2679c006382f983e4fe0 100644 (file)
 #define REG_DBG_PRINT(args...)
 #endif
 
+/**
+ * enum reg_request_treatment - regulatory request treatment
+ *
+ * @REG_REQ_OK: continue processing the regulatory request
+ * @REG_REQ_IGNORE: ignore the regulatory request
+ * @REG_REQ_INTERSECT: the regulatory domain resulting from this request should
+ *     be intersected with the current one.
+ * @REG_REQ_ALREADY_SET: the regulatory request will not change the current
+ *     regulatory settings, and no further processing is required.
+ * @REG_REQ_USER_HINT_HANDLED: a non alpha2  user hint was handled and no
+ *     further processing is required, i.e., not need to update last_request
+ *     etc. This should be used for user hints that do not provide an alpha2
+ *     but some other type of regulatory hint, i.e., indoor operation.
+ */
 enum reg_request_treatment {
        REG_REQ_OK,
        REG_REQ_IGNORE,
        REG_REQ_INTERSECT,
        REG_REQ_ALREADY_SET,
+       REG_REQ_USER_HINT_HANDLED,
 };
 
 static struct regulatory_request core_request_world = {
@@ -106,6 +121,14 @@ const struct ieee80211_regdomain __rcu *cfg80211_regdomain;
  */
 static int reg_num_devs_support_basehint;
 
+/*
+ * State variable indicating if the platform on which the devices
+ * are attached is operating in an indoor environment. The state variable
+ * is relevant for all registered devices.
+ * (protected by RTNL)
+ */
+static bool reg_is_indoor;
+
 static const struct ieee80211_regdomain *get_cfg80211_regdom(void)
 {
        return rtnl_dereference(cfg80211_regdomain);
@@ -240,8 +263,16 @@ static char user_alpha2[2];
 module_param(ieee80211_regdom, charp, 0444);
 MODULE_PARM_DESC(ieee80211_regdom, "IEEE 802.11 regulatory domain code");
 
-static void reg_free_request(struct regulatory_request *lr)
+static void reg_free_request(struct regulatory_request *request)
 {
+       if (request != get_last_request())
+               kfree(request);
+}
+
+static void reg_free_last_request(void)
+{
+       struct regulatory_request *lr = get_last_request();
+
        if (lr != &core_request_world && lr)
                kfree_rcu(lr, rcu_head);
 }
@@ -254,7 +285,7 @@ static void reg_update_last_request(struct regulatory_request *request)
        if (lr == request)
                return;
 
-       reg_free_request(lr);
+       reg_free_last_request();
        rcu_assign_pointer(last_request, request);
 }
 
@@ -873,6 +904,8 @@ static u32 map_regdom_flags(u32 rd_flags)
                channel_flags |= IEEE80211_CHAN_RADAR;
        if (rd_flags & NL80211_RRF_NO_OFDM)
                channel_flags |= IEEE80211_CHAN_NO_OFDM;
+       if (rd_flags & NL80211_RRF_NO_OUTDOOR)
+               channel_flags |= IEEE80211_CHAN_INDOOR_ONLY;
        return channel_flags;
 }
 
@@ -902,7 +935,7 @@ freq_reg_info_regd(struct wiphy *wiphy, u32 center_freq,
                if (!band_rule_found)
                        band_rule_found = freq_in_rule_band(fr, center_freq);
 
-               bw_fits = reg_does_bw_fit(fr, center_freq, MHZ_TO_KHZ(20));
+               bw_fits = reg_does_bw_fit(fr, center_freq, MHZ_TO_KHZ(5));
 
                if (band_rule_found && bw_fits)
                        return rr;
@@ -986,10 +1019,10 @@ static void chan_reg_rule_print_dbg(const struct ieee80211_regdomain *regd,
 }
 #endif
 
-/*
- * Note that right now we assume the desired channel bandwidth
- * is always 20 MHz for each individual channel (HT40 uses 20 MHz
- * per channel, the primary and the extension channel).
+/* Find an ieee80211_reg_rule such that a 5MHz channel with frequency
+ * chan->center_freq fits there.
+ * If there is no such reg_rule, disable the channel, otherwise set the
+ * flags corresponding to the bandwidths allowed in the particular reg_rule
  */
 static void handle_channel(struct wiphy *wiphy,
                           enum nl80211_reg_initiator initiator,
@@ -1050,8 +1083,12 @@ static void handle_channel(struct wiphy *wiphy,
        if (reg_rule->flags & NL80211_RRF_AUTO_BW)
                max_bandwidth_khz = reg_get_max_bandwidth(regd, reg_rule);
 
+       if (max_bandwidth_khz < MHZ_TO_KHZ(10))
+               bw_flags = IEEE80211_CHAN_NO_10MHZ;
+       if (max_bandwidth_khz < MHZ_TO_KHZ(20))
+               bw_flags |= IEEE80211_CHAN_NO_20MHZ;
        if (max_bandwidth_khz < MHZ_TO_KHZ(40))
-               bw_flags = IEEE80211_CHAN_NO_HT40;
+               bw_flags |= IEEE80211_CHAN_NO_HT40;
        if (max_bandwidth_khz < MHZ_TO_KHZ(80))
                bw_flags |= IEEE80211_CHAN_NO_80MHZ;
        if (max_bandwidth_khz < MHZ_TO_KHZ(160))
@@ -1071,6 +1108,13 @@ static void handle_channel(struct wiphy *wiphy,
                        (int) MBI_TO_DBI(power_rule->max_antenna_gain);
                chan->max_reg_power = chan->max_power = chan->orig_mpwr =
                        (int) MBM_TO_DBM(power_rule->max_eirp);
+
+               if (chan->flags & IEEE80211_CHAN_RADAR) {
+                       chan->dfs_cac_ms = IEEE80211_DFS_MIN_CAC_TIME_MS;
+                       if (reg_rule->dfs_cac_ms)
+                               chan->dfs_cac_ms = reg_rule->dfs_cac_ms;
+               }
+
                return;
        }
 
@@ -1126,12 +1170,19 @@ static bool reg_request_cell_base(struct regulatory_request *request)
        return request->user_reg_hint_type == NL80211_USER_REG_HINT_CELL_BASE;
 }
 
+static bool reg_request_indoor(struct regulatory_request *request)
+{
+       if (request->initiator != NL80211_REGDOM_SET_BY_USER)
+               return false;
+       return request->user_reg_hint_type == NL80211_USER_REG_HINT_INDOOR;
+}
+
 bool reg_last_request_cell_base(void)
 {
        return reg_request_cell_base(get_last_request());
 }
 
-#ifdef CONFIG_CFG80211_CERTIFICATION_ONUS
+#ifdef CONFIG_CFG80211_REG_CELLULAR_HINTS
 /* Core specific check */
 static enum reg_request_treatment
 reg_ignore_cell_hint(struct regulatory_request *pending_request)
@@ -1471,8 +1522,12 @@ static void handle_channel_custom(struct wiphy *wiphy,
        if (reg_rule->flags & NL80211_RRF_AUTO_BW)
                max_bandwidth_khz = reg_get_max_bandwidth(regd, reg_rule);
 
+       if (max_bandwidth_khz < MHZ_TO_KHZ(10))
+               bw_flags = IEEE80211_CHAN_NO_10MHZ;
+       if (max_bandwidth_khz < MHZ_TO_KHZ(20))
+               bw_flags |= IEEE80211_CHAN_NO_20MHZ;
        if (max_bandwidth_khz < MHZ_TO_KHZ(40))
-               bw_flags = IEEE80211_CHAN_NO_HT40;
+               bw_flags |= IEEE80211_CHAN_NO_HT40;
        if (max_bandwidth_khz < MHZ_TO_KHZ(80))
                bw_flags |= IEEE80211_CHAN_NO_80MHZ;
        if (max_bandwidth_khz < MHZ_TO_KHZ(160))
@@ -1568,6 +1623,11 @@ __reg_process_hint_user(struct regulatory_request *user_request)
 {
        struct regulatory_request *lr = get_last_request();
 
+       if (reg_request_indoor(user_request)) {
+               reg_is_indoor = true;
+               return REG_REQ_USER_HINT_HANDLED;
+       }
+
        if (reg_request_cell_base(user_request))
                return reg_ignore_cell_hint(user_request);
 
@@ -1615,8 +1675,9 @@ reg_process_hint_user(struct regulatory_request *user_request)
 
        treatment = __reg_process_hint_user(user_request);
        if (treatment == REG_REQ_IGNORE ||
-           treatment == REG_REQ_ALREADY_SET) {
-               kfree(user_request);
+           treatment == REG_REQ_ALREADY_SET ||
+           treatment == REG_REQ_USER_HINT_HANDLED) {
+               reg_free_request(user_request);
                return treatment;
        }
 
@@ -1676,14 +1737,15 @@ reg_process_hint_driver(struct wiphy *wiphy,
        case REG_REQ_OK:
                break;
        case REG_REQ_IGNORE:
-               kfree(driver_request);
+       case REG_REQ_USER_HINT_HANDLED:
+               reg_free_request(driver_request);
                return treatment;
        case REG_REQ_INTERSECT:
                /* fall through */
        case REG_REQ_ALREADY_SET:
                regd = reg_copy_regd(get_cfg80211_regdom());
                if (IS_ERR(regd)) {
-                       kfree(driver_request);
+                       reg_free_request(driver_request);
                        return REG_REQ_IGNORE;
                }
                rcu_assign_pointer(wiphy->regd, regd);
@@ -1775,12 +1837,13 @@ reg_process_hint_country_ie(struct wiphy *wiphy,
        case REG_REQ_OK:
                break;
        case REG_REQ_IGNORE:
+       case REG_REQ_USER_HINT_HANDLED:
                /* fall through */
        case REG_REQ_ALREADY_SET:
-               kfree(country_ie_request);
+               reg_free_request(country_ie_request);
                return treatment;
        case REG_REQ_INTERSECT:
-               kfree(country_ie_request);
+               reg_free_request(country_ie_request);
                /*
                 * This doesn't happen yet, not sure we
                 * ever want to support it for this case.
@@ -1841,7 +1904,7 @@ static void reg_process_hint(struct regulatory_request *reg_request)
        return;
 
 out_free:
-       kfree(reg_request);
+       reg_free_request(reg_request);
 }
 
 /*
@@ -1857,7 +1920,7 @@ static void reg_process_pending_hints(void)
 
        /* When last_request->processed becomes true this will be rescheduled */
        if (lr && !lr->processed) {
-               REG_DBG_PRINT("Pending regulatory request, waiting for it to be processed...\n");
+               reg_process_hint(lr);
                return;
        }
 
@@ -1967,6 +2030,22 @@ int regulatory_hint_user(const char *alpha2,
        return 0;
 }
 
+int regulatory_hint_indoor_user(void)
+{
+       struct regulatory_request *request;
+
+       request = kzalloc(sizeof(struct regulatory_request), GFP_KERNEL);
+       if (!request)
+               return -ENOMEM;
+
+       request->wiphy_idx = WIPHY_IDX_INVALID;
+       request->initiator = NL80211_REGDOM_SET_BY_USER;
+       request->user_reg_hint_type = NL80211_USER_REG_HINT_INDOOR;
+       queue_regulatory_request(request);
+
+       return 0;
+}
+
 /* Driver hints */
 int regulatory_hint(struct wiphy *wiphy, const char *alpha2)
 {
@@ -2134,6 +2213,8 @@ static void restore_regulatory_settings(bool reset_user)
 
        ASSERT_RTNL();
 
+       reg_is_indoor = false;
+
        reset_regdomains(true, &world_regdom);
        restore_alpha2(alpha2, reset_user);
 
@@ -2594,7 +2675,7 @@ void wiphy_regulatory_deregister(struct wiphy *wiphy)
                reg_num_devs_support_basehint--;
 
        rcu_free_regdom(get_wiphy_regdom(wiphy));
-       rcu_assign_pointer(wiphy->regd, NULL);
+       RCU_INIT_POINTER(wiphy->regd, NULL);
 
        if (lr)
                request_wiphy = wiphy_idx_to_wiphy(lr->wiphy_idx);
@@ -2614,6 +2695,40 @@ static void reg_timeout_work(struct work_struct *work)
        rtnl_unlock();
 }
 
+/*
+ * See http://www.fcc.gov/document/5-ghz-unlicensed-spectrum-unii, for
+ * UNII band definitions
+ */
+int cfg80211_get_unii(int freq)
+{
+       /* UNII-1 */
+       if (freq >= 5150 && freq <= 5250)
+               return 0;
+
+       /* UNII-2A */
+       if (freq > 5250 && freq <= 5350)
+               return 1;
+
+       /* UNII-2B */
+       if (freq > 5350 && freq <= 5470)
+               return 2;
+
+       /* UNII-2C */
+       if (freq > 5470 && freq <= 5725)
+               return 3;
+
+       /* UNII-3 */
+       if (freq > 5725 && freq <= 5825)
+               return 4;
+
+       return -EINVAL;
+}
+
+bool regulatory_indoor_allowed(void)
+{
+       return reg_is_indoor;
+}
+
 int __init regulatory_init(void)
 {
        int err = 0;
index 37c180df34b72a1195aacb6d72b7b07ddc44a9ef..5e48031ccb9afc33a41c36221e3bef924625ebe8 100644 (file)
@@ -25,6 +25,7 @@ enum nl80211_dfs_regions reg_get_dfs_region(struct wiphy *wiphy);
 
 int regulatory_hint_user(const char *alpha2,
                         enum nl80211_user_reg_hint_type user_reg_hint_type);
+int regulatory_hint_indoor_user(void);
 
 void wiphy_regulatory_register(struct wiphy *wiphy);
 void wiphy_regulatory_deregister(struct wiphy *wiphy);
@@ -104,4 +105,21 @@ void regulatory_hint_country_ie(struct wiphy *wiphy,
  */
 void regulatory_hint_disconnect(void);
 
+/**
+ * cfg80211_get_unii - get the U-NII band for the frequency
+ * @freq: the frequency for which we want to get the UNII band.
+
+ * Get a value specifying the U-NII band frequency belongs to.
+ * U-NII bands are defined by the FCC in C.F.R 47 part 15.
+ *
+ * Returns -EINVAL if freq is invalid, 0 for UNII-1, 1 for UNII-2A,
+ * 2 for UNII-2B, 3 for UNII-2C and 4 for UNII-3.
+ */
+int cfg80211_get_unii(int freq);
+
+/**
+ * regulatory_indoor_allowed - is indoor operation allowed
+ */
+bool regulatory_indoor_allowed(void);
+
 #endif  /* __NET_WIRELESS_REG_H */
index 7d09a712cb1f1353f13310f5c68b38e750d199a6..e7329bb6a323c95d0a3297f9fc928780f9bc7f66 100644 (file)
@@ -81,10 +81,10 @@ static void bss_free(struct cfg80211_internal_bss *bss)
        kfree(bss);
 }
 
-static inline void bss_ref_get(struct cfg80211_registered_device *dev,
+static inline void bss_ref_get(struct cfg80211_registered_device *rdev,
                               struct cfg80211_internal_bss *bss)
 {
-       lockdep_assert_held(&dev->bss_lock);
+       lockdep_assert_held(&rdev->bss_lock);
 
        bss->refcount++;
        if (bss->pub.hidden_beacon_bss) {
@@ -95,10 +95,10 @@ static inline void bss_ref_get(struct cfg80211_registered_device *dev,
        }
 }
 
-static inline void bss_ref_put(struct cfg80211_registered_device *dev,
+static inline void bss_ref_put(struct cfg80211_registered_device *rdev,
                               struct cfg80211_internal_bss *bss)
 {
-       lockdep_assert_held(&dev->bss_lock);
+       lockdep_assert_held(&rdev->bss_lock);
 
        if (bss->pub.hidden_beacon_bss) {
                struct cfg80211_internal_bss *hbss;
@@ -114,10 +114,10 @@ static inline void bss_ref_put(struct cfg80211_registered_device *dev,
                bss_free(bss);
 }
 
-static bool __cfg80211_unlink_bss(struct cfg80211_registered_device *dev,
+static bool __cfg80211_unlink_bss(struct cfg80211_registered_device *rdev,
                                  struct cfg80211_internal_bss *bss)
 {
-       lockdep_assert_held(&dev->bss_lock);
+       lockdep_assert_held(&rdev->bss_lock);
 
        if (!list_empty(&bss->hidden_list)) {
                /*
@@ -134,31 +134,31 @@ static bool __cfg80211_unlink_bss(struct cfg80211_registered_device *dev,
        }
 
        list_del_init(&bss->list);
-       rb_erase(&bss->rbn, &dev->bss_tree);
-       bss_ref_put(dev, bss);
+       rb_erase(&bss->rbn, &rdev->bss_tree);
+       bss_ref_put(rdev, bss);
        return true;
 }
 
-static void __cfg80211_bss_expire(struct cfg80211_registered_device *dev,
+static void __cfg80211_bss_expire(struct cfg80211_registered_device *rdev,
                                  unsigned long expire_time)
 {
        struct cfg80211_internal_bss *bss, *tmp;
        bool expired = false;
 
-       lockdep_assert_held(&dev->bss_lock);
+       lockdep_assert_held(&rdev->bss_lock);
 
-       list_for_each_entry_safe(bss, tmp, &dev->bss_list, list) {
+       list_for_each_entry_safe(bss, tmp, &rdev->bss_list, list) {
                if (atomic_read(&bss->hold))
                        continue;
                if (!time_after(expire_time, bss->ts))
                        continue;
 
-               if (__cfg80211_unlink_bss(dev, bss))
+               if (__cfg80211_unlink_bss(rdev, bss))
                        expired = true;
        }
 
        if (expired)
-               dev->bss_generation++;
+               rdev->bss_generation++;
 }
 
 void ___cfg80211_scan_done(struct cfg80211_registered_device *rdev,
@@ -238,11 +238,11 @@ void __cfg80211_scan_done(struct work_struct *wk)
 void cfg80211_scan_done(struct cfg80211_scan_request *request, bool aborted)
 {
        trace_cfg80211_scan_done(request, aborted);
-       WARN_ON(request != wiphy_to_dev(request->wiphy)->scan_req);
+       WARN_ON(request != wiphy_to_rdev(request->wiphy)->scan_req);
 
        request->aborted = aborted;
        request->notified = true;
-       queue_work(cfg80211_wq, &wiphy_to_dev(request->wiphy)->scan_done_wk);
+       queue_work(cfg80211_wq, &wiphy_to_rdev(request->wiphy)->scan_done_wk);
 }
 EXPORT_SYMBOL(cfg80211_scan_done);
 
@@ -278,20 +278,28 @@ void cfg80211_sched_scan_results(struct wiphy *wiphy)
 {
        trace_cfg80211_sched_scan_results(wiphy);
        /* ignore if we're not scanning */
-       if (wiphy_to_dev(wiphy)->sched_scan_req)
+       if (wiphy_to_rdev(wiphy)->sched_scan_req)
                queue_work(cfg80211_wq,
-                          &wiphy_to_dev(wiphy)->sched_scan_results_wk);
+                          &wiphy_to_rdev(wiphy)->sched_scan_results_wk);
 }
 EXPORT_SYMBOL(cfg80211_sched_scan_results);
 
-void cfg80211_sched_scan_stopped(struct wiphy *wiphy)
+void cfg80211_sched_scan_stopped_rtnl(struct wiphy *wiphy)
 {
-       struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
+
+       ASSERT_RTNL();
 
        trace_cfg80211_sched_scan_stopped(wiphy);
 
-       rtnl_lock();
        __cfg80211_stop_sched_scan(rdev, true);
+}
+EXPORT_SYMBOL(cfg80211_sched_scan_stopped_rtnl);
+
+void cfg80211_sched_scan_stopped(struct wiphy *wiphy)
+{
+       rtnl_lock();
+       cfg80211_sched_scan_stopped_rtnl(wiphy);
        rtnl_unlock();
 }
 EXPORT_SYMBOL(cfg80211_sched_scan_stopped);
@@ -322,21 +330,21 @@ int __cfg80211_stop_sched_scan(struct cfg80211_registered_device *rdev,
        return 0;
 }
 
-void cfg80211_bss_age(struct cfg80211_registered_device *dev,
+void cfg80211_bss_age(struct cfg80211_registered_device *rdev,
                       unsigned long age_secs)
 {
        struct cfg80211_internal_bss *bss;
        unsigned long age_jiffies = msecs_to_jiffies(age_secs * MSEC_PER_SEC);
 
-       spin_lock_bh(&dev->bss_lock);
-       list_for_each_entry(bss, &dev->bss_list, list)
+       spin_lock_bh(&rdev->bss_lock);
+       list_for_each_entry(bss, &rdev->bss_list, list)
                bss->ts -= age_jiffies;
-       spin_unlock_bh(&dev->bss_lock);
+       spin_unlock_bh(&rdev->bss_lock);
 }
 
-void cfg80211_bss_expire(struct cfg80211_registered_device *dev)
+void cfg80211_bss_expire(struct cfg80211_registered_device *rdev)
 {
-       __cfg80211_bss_expire(dev, jiffies - IEEE80211_SCAN_RESULT_EXPIRE);
+       __cfg80211_bss_expire(rdev, jiffies - IEEE80211_SCAN_RESULT_EXPIRE);
 }
 
 const u8 *cfg80211_find_ie(u8 eid, const u8 *ies, int len)
@@ -526,32 +534,34 @@ struct cfg80211_bss *cfg80211_get_bss(struct wiphy *wiphy,
                                      const u8 *ssid, size_t ssid_len,
                                      u16 capa_mask, u16 capa_val)
 {
-       struct cfg80211_registered_device *dev = wiphy_to_dev(wiphy);
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
        struct cfg80211_internal_bss *bss, *res = NULL;
        unsigned long now = jiffies;
 
        trace_cfg80211_get_bss(wiphy, channel, bssid, ssid, ssid_len, capa_mask,
                               capa_val);
 
-       spin_lock_bh(&dev->bss_lock);
+       spin_lock_bh(&rdev->bss_lock);
 
-       list_for_each_entry(bss, &dev->bss_list, list) {
+       list_for_each_entry(bss, &rdev->bss_list, list) {
                if ((bss->pub.capability & capa_mask) != capa_val)
                        continue;
                if (channel && bss->pub.channel != channel)
                        continue;
+               if (!is_valid_ether_addr(bss->pub.bssid))
+                       continue;
                /* Don't get expired BSS structs */
                if (time_after(now, bss->ts + IEEE80211_SCAN_RESULT_EXPIRE) &&
                    !atomic_read(&bss->hold))
                        continue;
                if (is_bss(&bss->pub, bssid, ssid, ssid_len)) {
                        res = bss;
-                       bss_ref_get(dev, res);
+                       bss_ref_get(rdev, res);
                        break;
                }
        }
 
-       spin_unlock_bh(&dev->bss_lock);
+       spin_unlock_bh(&rdev->bss_lock);
        if (!res)
                return NULL;
        trace_cfg80211_return_bss(&res->pub);
@@ -559,10 +569,10 @@ struct cfg80211_bss *cfg80211_get_bss(struct wiphy *wiphy,
 }
 EXPORT_SYMBOL(cfg80211_get_bss);
 
-static void rb_insert_bss(struct cfg80211_registered_device *dev,
+static void rb_insert_bss(struct cfg80211_registered_device *rdev,
                          struct cfg80211_internal_bss *bss)
 {
-       struct rb_node **p = &dev->bss_tree.rb_node;
+       struct rb_node **p = &rdev->bss_tree.rb_node;
        struct rb_node *parent = NULL;
        struct cfg80211_internal_bss *tbss;
        int cmp;
@@ -585,15 +595,15 @@ static void rb_insert_bss(struct cfg80211_registered_device *dev,
        }
 
        rb_link_node(&bss->rbn, parent, p);
-       rb_insert_color(&bss->rbn, &dev->bss_tree);
+       rb_insert_color(&bss->rbn, &rdev->bss_tree);
 }
 
 static struct cfg80211_internal_bss *
-rb_find_bss(struct cfg80211_registered_device *dev,
+rb_find_bss(struct cfg80211_registered_device *rdev,
            struct cfg80211_internal_bss *res,
            enum bss_compare_mode mode)
 {
-       struct rb_node *n = dev->bss_tree.rb_node;
+       struct rb_node *n = rdev->bss_tree.rb_node;
        struct cfg80211_internal_bss *bss;
        int r;
 
@@ -612,7 +622,7 @@ rb_find_bss(struct cfg80211_registered_device *dev,
        return NULL;
 }
 
-static bool cfg80211_combine_bsses(struct cfg80211_registered_device *dev,
+static bool cfg80211_combine_bsses(struct cfg80211_registered_device *rdev,
                                   struct cfg80211_internal_bss *new)
 {
        const struct cfg80211_bss_ies *ies;
@@ -642,7 +652,7 @@ static bool cfg80211_combine_bsses(struct cfg80211_registered_device *dev,
 
        /* This is the bad part ... */
 
-       list_for_each_entry(bss, &dev->bss_list, list) {
+       list_for_each_entry(bss, &rdev->bss_list, list) {
                if (!ether_addr_equal(bss->pub.bssid, new->pub.bssid))
                        continue;
                if (bss->pub.channel != new->pub.channel)
@@ -676,7 +686,7 @@ static bool cfg80211_combine_bsses(struct cfg80211_registered_device *dev,
 
 /* Returned bss is reference counted and must be cleaned up appropriately. */
 static struct cfg80211_internal_bss *
-cfg80211_bss_update(struct cfg80211_registered_device *dev,
+cfg80211_bss_update(struct cfg80211_registered_device *rdev,
                    struct cfg80211_internal_bss *tmp,
                    bool signal_valid)
 {
@@ -687,14 +697,14 @@ cfg80211_bss_update(struct cfg80211_registered_device *dev,
 
        tmp->ts = jiffies;
 
-       spin_lock_bh(&dev->bss_lock);
+       spin_lock_bh(&rdev->bss_lock);
 
        if (WARN_ON(!rcu_access_pointer(tmp->pub.ies))) {
-               spin_unlock_bh(&dev->bss_lock);
+               spin_unlock_bh(&rdev->bss_lock);
                return NULL;
        }
 
-       found = rb_find_bss(dev, tmp, BSS_CMP_REGULAR);
+       found = rb_find_bss(rdev, tmp, BSS_CMP_REGULAR);
 
        if (found) {
                /* Update IEs */
@@ -781,7 +791,7 @@ cfg80211_bss_update(struct cfg80211_registered_device *dev,
                 * is allocated on the stack since it's not needed in the
                 * more common case of an update
                 */
-               new = kzalloc(sizeof(*new) + dev->wiphy.bss_priv_size,
+               new = kzalloc(sizeof(*new) + rdev->wiphy.bss_priv_size,
                              GFP_ATOMIC);
                if (!new) {
                        ies = (void *)rcu_dereference(tmp->pub.beacon_ies);
@@ -797,9 +807,9 @@ cfg80211_bss_update(struct cfg80211_registered_device *dev,
                INIT_LIST_HEAD(&new->hidden_list);
 
                if (rcu_access_pointer(tmp->pub.proberesp_ies)) {
-                       hidden = rb_find_bss(dev, tmp, BSS_CMP_HIDE_ZLEN);
+                       hidden = rb_find_bss(rdev, tmp, BSS_CMP_HIDE_ZLEN);
                        if (!hidden)
-                               hidden = rb_find_bss(dev, tmp,
+                               hidden = rb_find_bss(rdev, tmp,
                                                     BSS_CMP_HIDE_NUL);
                        if (hidden) {
                                new->pub.hidden_beacon_bss = &hidden->pub;
@@ -816,24 +826,24 @@ cfg80211_bss_update(struct cfg80211_registered_device *dev,
                         * expensive search for any probe responses that should
                         * be grouped with this beacon for updates ...
                         */
-                       if (!cfg80211_combine_bsses(dev, new)) {
+                       if (!cfg80211_combine_bsses(rdev, new)) {
                                kfree(new);
                                goto drop;
                        }
                }
 
-               list_add_tail(&new->list, &dev->bss_list);
-               rb_insert_bss(dev, new);
+               list_add_tail(&new->list, &rdev->bss_list);
+               rb_insert_bss(rdev, new);
                found = new;
        }
 
-       dev->bss_generation++;
-       bss_ref_get(dev, found);
-       spin_unlock_bh(&dev->bss_lock);
+       rdev->bss_generation++;
+       bss_ref_get(rdev, found);
+       spin_unlock_bh(&rdev->bss_lock);
 
        return found;
  drop:
-       spin_unlock_bh(&dev->bss_lock);
+       spin_unlock_bh(&rdev->bss_lock);
        return NULL;
 }
 
@@ -917,7 +927,7 @@ cfg80211_inform_bss_width(struct wiphy *wiphy,
        rcu_assign_pointer(tmp.pub.beacon_ies, ies);
        rcu_assign_pointer(tmp.pub.ies, ies);
 
-       res = cfg80211_bss_update(wiphy_to_dev(wiphy), &tmp,
+       res = cfg80211_bss_update(wiphy_to_rdev(wiphy), &tmp,
                                  rx_channel == channel);
        if (!res)
                return NULL;
@@ -989,7 +999,7 @@ cfg80211_inform_bss_width_frame(struct wiphy *wiphy,
        tmp.pub.beacon_interval = le16_to_cpu(mgmt->u.probe_resp.beacon_int);
        tmp.pub.capability = le16_to_cpu(mgmt->u.probe_resp.capab_info);
 
-       res = cfg80211_bss_update(wiphy_to_dev(wiphy), &tmp,
+       res = cfg80211_bss_update(wiphy_to_rdev(wiphy), &tmp,
                                  rx_channel == channel);
        if (!res)
                return NULL;
@@ -1005,7 +1015,7 @@ EXPORT_SYMBOL(cfg80211_inform_bss_width_frame);
 
 void cfg80211_ref_bss(struct wiphy *wiphy, struct cfg80211_bss *pub)
 {
-       struct cfg80211_registered_device *dev = wiphy_to_dev(wiphy);
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
        struct cfg80211_internal_bss *bss;
 
        if (!pub)
@@ -1013,15 +1023,15 @@ void cfg80211_ref_bss(struct wiphy *wiphy, struct cfg80211_bss *pub)
 
        bss = container_of(pub, struct cfg80211_internal_bss, pub);
 
-       spin_lock_bh(&dev->bss_lock);
-       bss_ref_get(dev, bss);
-       spin_unlock_bh(&dev->bss_lock);
+       spin_lock_bh(&rdev->bss_lock);
+       bss_ref_get(rdev, bss);
+       spin_unlock_bh(&rdev->bss_lock);
 }
 EXPORT_SYMBOL(cfg80211_ref_bss);
 
 void cfg80211_put_bss(struct wiphy *wiphy, struct cfg80211_bss *pub)
 {
-       struct cfg80211_registered_device *dev = wiphy_to_dev(wiphy);
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
        struct cfg80211_internal_bss *bss;
 
        if (!pub)
@@ -1029,15 +1039,15 @@ void cfg80211_put_bss(struct wiphy *wiphy, struct cfg80211_bss *pub)
 
        bss = container_of(pub, struct cfg80211_internal_bss, pub);
 
-       spin_lock_bh(&dev->bss_lock);
-       bss_ref_put(dev, bss);
-       spin_unlock_bh(&dev->bss_lock);
+       spin_lock_bh(&rdev->bss_lock);
+       bss_ref_put(rdev, bss);
+       spin_unlock_bh(&rdev->bss_lock);
 }
 EXPORT_SYMBOL(cfg80211_put_bss);
 
 void cfg80211_unlink_bss(struct wiphy *wiphy, struct cfg80211_bss *pub)
 {
-       struct cfg80211_registered_device *dev = wiphy_to_dev(wiphy);
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
        struct cfg80211_internal_bss *bss;
 
        if (WARN_ON(!pub))
@@ -1045,12 +1055,12 @@ void cfg80211_unlink_bss(struct wiphy *wiphy, struct cfg80211_bss *pub)
 
        bss = container_of(pub, struct cfg80211_internal_bss, pub);
 
-       spin_lock_bh(&dev->bss_lock);
+       spin_lock_bh(&rdev->bss_lock);
        if (!list_empty(&bss->list)) {
-               if (__cfg80211_unlink_bss(dev, bss))
-                       dev->bss_generation++;
+               if (__cfg80211_unlink_bss(rdev, bss))
+                       rdev->bss_generation++;
        }
-       spin_unlock_bh(&dev->bss_lock);
+       spin_unlock_bh(&rdev->bss_lock);
 }
 EXPORT_SYMBOL(cfg80211_unlink_bss);
 
@@ -1067,7 +1077,7 @@ cfg80211_get_dev_from_ifindex(struct net *net, int ifindex)
        if (!dev)
                return ERR_PTR(-ENODEV);
        if (dev->ieee80211_ptr)
-               rdev = wiphy_to_dev(dev->ieee80211_ptr->wiphy);
+               rdev = wiphy_to_rdev(dev->ieee80211_ptr->wiphy);
        else
                rdev = ERR_PTR(-ENODEV);
        dev_put(dev);
@@ -1147,7 +1157,11 @@ int cfg80211_wext_siwscan(struct net_device *dev,
                                int k;
                                int wiphy_freq = wiphy->bands[band]->channels[j].center_freq;
                                for (k = 0; k < wreq->num_channels; k++) {
-                                       int wext_freq = cfg80211_wext_freq(wiphy, &wreq->channel_list[k]);
+                                       struct iw_freq *freq =
+                                               &wreq->channel_list[k];
+                                       int wext_freq =
+                                               cfg80211_wext_freq(freq);
+
                                        if (wext_freq == wiphy_freq)
                                                goto wext_freq_found;
                                }
@@ -1459,7 +1473,7 @@ ieee80211_bss(struct wiphy *wiphy, struct iw_request_info *info,
 }
 
 
-static int ieee80211_scan_results(struct cfg80211_registered_device *dev,
+static int ieee80211_scan_results(struct cfg80211_registered_device *rdev,
                                  struct iw_request_info *info,
                                  char *buf, size_t len)
 {
@@ -1467,18 +1481,18 @@ static int ieee80211_scan_results(struct cfg80211_registered_device *dev,
        char *end_buf = buf + len;
        struct cfg80211_internal_bss *bss;
 
-       spin_lock_bh(&dev->bss_lock);
-       cfg80211_bss_expire(dev);
+       spin_lock_bh(&rdev->bss_lock);
+       cfg80211_bss_expire(rdev);
 
-       list_for_each_entry(bss, &dev->bss_list, list) {
+       list_for_each_entry(bss, &rdev->bss_list, list) {
                if (buf + len - current_ev <= IW_EV_ADDR_LEN) {
-                       spin_unlock_bh(&dev->bss_lock);
+                       spin_unlock_bh(&rdev->bss_lock);
                        return -E2BIG;
                }
-               current_ev = ieee80211_bss(&dev->wiphy, info, bss,
+               current_ev = ieee80211_bss(&rdev->wiphy, info, bss,
                                           current_ev, end_buf);
        }
-       spin_unlock_bh(&dev->bss_lock);
+       spin_unlock_bh(&rdev->bss_lock);
        return current_ev - buf;
 }
 
index acdcb4a81817b7c78e8e721ff632284b9b806fa9..0c0844b585d1e833a39a132b4339479d35a517cd 100644 (file)
@@ -59,7 +59,7 @@ static void cfg80211_sme_free(struct wireless_dev *wdev)
 
 static int cfg80211_conn_scan(struct wireless_dev *wdev)
 {
-       struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
        struct cfg80211_scan_request *request;
        int n_channels, err;
 
@@ -130,7 +130,7 @@ static int cfg80211_conn_scan(struct wireless_dev *wdev)
 
 static int cfg80211_conn_do_work(struct wireless_dev *wdev)
 {
-       struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
        struct cfg80211_connect_params *params;
        struct cfg80211_assoc_request req = {};
        int err;
@@ -234,7 +234,6 @@ void cfg80211_conn_work(struct work_struct *work)
                                        NULL, 0, NULL, 0,
                                        WLAN_STATUS_UNSPECIFIED_FAILURE,
                                        false, NULL);
-                       cfg80211_sme_free(wdev);
                }
                wdev_unlock(wdev);
        }
@@ -245,7 +244,7 @@ void cfg80211_conn_work(struct work_struct *work)
 /* Returned bss is reference counted and must be cleaned up appropriately. */
 static struct cfg80211_bss *cfg80211_get_conn_bss(struct wireless_dev *wdev)
 {
-       struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
        struct cfg80211_bss *bss;
        u16 capa = WLAN_CAPABILITY_ESS;
 
@@ -275,7 +274,7 @@ static struct cfg80211_bss *cfg80211_get_conn_bss(struct wireless_dev *wdev)
 static void __cfg80211_sme_scan_done(struct net_device *dev)
 {
        struct wireless_dev *wdev = dev->ieee80211_ptr;
-       struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
        struct cfg80211_bss *bss;
 
        ASSERT_WDEV_LOCK(wdev);
@@ -306,7 +305,7 @@ void cfg80211_sme_scan_done(struct net_device *dev)
 void cfg80211_sme_rx_auth(struct wireless_dev *wdev, const u8 *buf, size_t len)
 {
        struct wiphy *wiphy = wdev->wiphy;
-       struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
        struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)buf;
        u16 status_code = le16_to_cpu(mgmt->u.auth.status_code);
 
@@ -352,7 +351,7 @@ void cfg80211_sme_rx_auth(struct wireless_dev *wdev, const u8 *buf, size_t len)
 
 bool cfg80211_sme_rx_assoc_resp(struct wireless_dev *wdev, u16 status)
 {
-       struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
 
        if (!wdev->conn)
                return false;
@@ -386,7 +385,7 @@ void cfg80211_sme_deauth(struct wireless_dev *wdev)
 
 void cfg80211_sme_auth_timeout(struct wireless_dev *wdev)
 {
-       struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
 
        if (!wdev->conn)
                return;
@@ -397,7 +396,7 @@ void cfg80211_sme_auth_timeout(struct wireless_dev *wdev)
 
 void cfg80211_sme_disassoc(struct wireless_dev *wdev)
 {
-       struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
 
        if (!wdev->conn)
                return;
@@ -408,7 +407,7 @@ void cfg80211_sme_disassoc(struct wireless_dev *wdev)
 
 void cfg80211_sme_assoc_timeout(struct wireless_dev *wdev)
 {
-       struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
 
        if (!wdev->conn)
                return;
@@ -421,7 +420,7 @@ static int cfg80211_sme_connect(struct wireless_dev *wdev,
                                struct cfg80211_connect_params *connect,
                                const u8 *prev_bssid)
 {
-       struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
        struct cfg80211_bss *bss;
        int err;
 
@@ -468,7 +467,7 @@ static int cfg80211_sme_connect(struct wireless_dev *wdev,
        }
 
        wdev->conn->params.ssid = wdev->ssid;
-       wdev->conn->params.ssid_len = connect->ssid_len;
+       wdev->conn->params.ssid_len = wdev->ssid_len;
 
        /* see if we have the bss already */
        bss = cfg80211_get_conn_bss(wdev);
@@ -480,7 +479,6 @@ static int cfg80211_sme_connect(struct wireless_dev *wdev,
 
        /* we're good if we have a matching bss struct */
        if (bss) {
-               wdev->conn->state = CFG80211_CONN_AUTHENTICATE_NEXT;
                err = cfg80211_conn_do_work(wdev);
                cfg80211_put_bss(wdev->wiphy, bss);
        } else {
@@ -506,7 +504,7 @@ static int cfg80211_sme_connect(struct wireless_dev *wdev,
 
 static int cfg80211_sme_disconnect(struct wireless_dev *wdev, u16 reason)
 {
-       struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
        int err;
 
        if (!wdev->conn)
@@ -594,7 +592,7 @@ void __cfg80211_connect_result(struct net_device *dev, const u8 *bssid,
                return;
        }
 
-       nl80211_send_connect_result(wiphy_to_dev(wdev->wiphy), dev,
+       nl80211_send_connect_result(wiphy_to_rdev(wdev->wiphy), dev,
                                    bssid, req_ie, req_ie_len,
                                    resp_ie, resp_ie_len,
                                    status, GFP_KERNEL);
@@ -625,7 +623,7 @@ void __cfg80211_connect_result(struct net_device *dev, const u8 *bssid,
 #endif
 
        if (!bss && (status == WLAN_STATUS_SUCCESS)) {
-               WARN_ON_ONCE(!wiphy_to_dev(wdev->wiphy)->ops->connect);
+               WARN_ON_ONCE(!wiphy_to_rdev(wdev->wiphy)->ops->connect);
                bss = cfg80211_get_bss(wdev->wiphy, NULL, bssid,
                                       wdev->ssid, wdev->ssid_len,
                                       WLAN_CAPABILITY_ESS,
@@ -648,6 +646,7 @@ void __cfg80211_connect_result(struct net_device *dev, const u8 *bssid,
                        cfg80211_unhold_bss(bss_from_pub(bss));
                        cfg80211_put_bss(wdev->wiphy, bss);
                }
+               cfg80211_sme_free(wdev);
                return;
        }
 
@@ -687,7 +686,7 @@ void cfg80211_connect_result(struct net_device *dev, const u8 *bssid,
                             u16 status, gfp_t gfp)
 {
        struct wireless_dev *wdev = dev->ieee80211_ptr;
-       struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
        struct cfg80211_event *ev;
        unsigned long flags;
 
@@ -742,7 +741,8 @@ void __cfg80211_roamed(struct wireless_dev *wdev,
        cfg80211_hold_bss(bss_from_pub(bss));
        wdev->current_bss = bss_from_pub(bss);
 
-       nl80211_send_roamed(wiphy_to_dev(wdev->wiphy), wdev->netdev, bss->bssid,
+       nl80211_send_roamed(wiphy_to_rdev(wdev->wiphy),
+                           wdev->netdev, bss->bssid,
                            req_ie, req_ie_len, resp_ie, resp_ie_len,
                            GFP_KERNEL);
 
@@ -801,7 +801,7 @@ void cfg80211_roamed_bss(struct net_device *dev,
                         size_t resp_ie_len, gfp_t gfp)
 {
        struct wireless_dev *wdev = dev->ieee80211_ptr;
-       struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
        struct cfg80211_event *ev;
        unsigned long flags;
 
@@ -834,7 +834,7 @@ void __cfg80211_disconnected(struct net_device *dev, const u8 *ie,
                             size_t ie_len, u16 reason, bool from_ap)
 {
        struct wireless_dev *wdev = dev->ieee80211_ptr;
-       struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
        int i;
 #ifdef CONFIG_CFG80211_WEXT
        union iwreq_data wrqu;
@@ -880,7 +880,7 @@ void cfg80211_disconnected(struct net_device *dev, u16 reason,
                           u8 *ie, size_t ie_len, gfp_t gfp)
 {
        struct wireless_dev *wdev = dev->ieee80211_ptr;
-       struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
        struct cfg80211_event *ev;
        unsigned long flags;
 
index aabccf13e07b6860ef92ddc637a7879a8f961aab..f3c13ff4d04c8742c028126970938233da10b1d2 100644 (file)
@@ -1919,6 +1919,24 @@ TRACE_EVENT(rdev_set_qos_map,
                  WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->num_des)
 );
 
+TRACE_EVENT(rdev_set_ap_chanwidth,
+       TP_PROTO(struct wiphy *wiphy, struct net_device *netdev,
+                struct cfg80211_chan_def *chandef),
+       TP_ARGS(wiphy, netdev, chandef),
+       TP_STRUCT__entry(
+               WIPHY_ENTRY
+               NETDEV_ENTRY
+               CHAN_DEF_ENTRY
+       ),
+       TP_fast_assign(
+               WIPHY_ASSIGN;
+               NETDEV_ASSIGN;
+               CHAN_DEF_ASSIGN(chandef);
+       ),
+       TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", " CHAN_DEF_PR_FMT,
+                 WIPHY_PR_ARG, NETDEV_PR_ARG, CHAN_DEF_PR_ARG)
+);
+
 /*************************************************************
  *          cfg80211 exported functions traces              *
  *************************************************************/
@@ -2193,18 +2211,21 @@ TRACE_EVENT(cfg80211_cqm_rssi_notify,
 );
 
 TRACE_EVENT(cfg80211_reg_can_beacon,
-       TP_PROTO(struct wiphy *wiphy, struct cfg80211_chan_def *chandef),
-       TP_ARGS(wiphy, chandef),
+       TP_PROTO(struct wiphy *wiphy, struct cfg80211_chan_def *chandef,
+                enum nl80211_iftype iftype),
+       TP_ARGS(wiphy, chandef, iftype),
        TP_STRUCT__entry(
                WIPHY_ENTRY
                CHAN_DEF_ENTRY
+               __field(enum nl80211_iftype, iftype)
        ),
        TP_fast_assign(
                WIPHY_ASSIGN;
                CHAN_DEF_ASSIGN(chandef);
+               __entry->iftype = iftype;
        ),
-       TP_printk(WIPHY_PR_FMT ", " CHAN_DEF_PR_FMT,
-                 WIPHY_PR_ARG, CHAN_DEF_PR_ARG)
+       TP_printk(WIPHY_PR_FMT ", " CHAN_DEF_PR_FMT ", iftype=%d",
+                 WIPHY_PR_ARG, CHAN_DEF_PR_ARG, __entry->iftype)
 );
 
 TRACE_EVENT(cfg80211_chandef_dfs_required,
index e5872ff2c27ca8989ca6da7cfdf4d7041c29a72e..7c47fa07b276f90ad870cb9789aa8c5b8fb63521 100644 (file)
@@ -770,7 +770,7 @@ EXPORT_SYMBOL(ieee80211_bss_get_ie);
 
 void cfg80211_upload_connect_keys(struct wireless_dev *wdev)
 {
-       struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
        struct net_device *dev = wdev->netdev;
        int i;
 
@@ -888,11 +888,6 @@ int cfg80211_change_iface(struct cfg80211_registered_device *rdev,
                return -EBUSY;
 
        if (ntype != otype && netif_running(dev)) {
-               err = cfg80211_can_change_interface(rdev, dev->ieee80211_ptr,
-                                                   ntype);
-               if (err)
-                       return err;
-
                dev->ieee80211_ptr->use_4addr = false;
                dev->ieee80211_ptr->mesh_id_up_len = 0;
                wdev_lock(dev->ieee80211_ptr);
@@ -1268,6 +1263,106 @@ int cfg80211_validate_beacon_int(struct cfg80211_registered_device *rdev,
        return res;
 }
 
+int cfg80211_iter_combinations(struct wiphy *wiphy,
+                              const int num_different_channels,
+                              const u8 radar_detect,
+                              const int iftype_num[NUM_NL80211_IFTYPES],
+                              void (*iter)(const struct ieee80211_iface_combination *c,
+                                           void *data),
+                              void *data)
+{
+       int i, j, iftype;
+       int num_interfaces = 0;
+       u32 used_iftypes = 0;
+
+       for (iftype = 0; iftype < NUM_NL80211_IFTYPES; iftype++) {
+               num_interfaces += iftype_num[iftype];
+               if (iftype_num[iftype] > 0 &&
+                   !(wiphy->software_iftypes & BIT(iftype)))
+                       used_iftypes |= BIT(iftype);
+       }
+
+       for (i = 0; i < wiphy->n_iface_combinations; i++) {
+               const struct ieee80211_iface_combination *c;
+               struct ieee80211_iface_limit *limits;
+               u32 all_iftypes = 0;
+
+               c = &wiphy->iface_combinations[i];
+
+               if (num_interfaces > c->max_interfaces)
+                       continue;
+               if (num_different_channels > c->num_different_channels)
+                       continue;
+
+               limits = kmemdup(c->limits, sizeof(limits[0]) * c->n_limits,
+                                GFP_KERNEL);
+               if (!limits)
+                       return -ENOMEM;
+
+               for (iftype = 0; iftype < NUM_NL80211_IFTYPES; iftype++) {
+                       if (wiphy->software_iftypes & BIT(iftype))
+                               continue;
+                       for (j = 0; j < c->n_limits; j++) {
+                               all_iftypes |= limits[j].types;
+                               if (!(limits[j].types & BIT(iftype)))
+                                       continue;
+                               if (limits[j].max < iftype_num[iftype])
+                                       goto cont;
+                               limits[j].max -= iftype_num[iftype];
+                       }
+               }
+
+               if (radar_detect != (c->radar_detect_widths & radar_detect))
+                       goto cont;
+
+               /* Finally check that all iftypes that we're currently
+                * using are actually part of this combination. If they
+                * aren't then we can't use this combination and have
+                * to continue to the next.
+                */
+               if ((all_iftypes & used_iftypes) != used_iftypes)
+                       goto cont;
+
+               /* This combination covered all interface types and
+                * supported the requested numbers, so we're good.
+                */
+
+               (*iter)(c, data);
+ cont:
+               kfree(limits);
+       }
+
+       return 0;
+}
+EXPORT_SYMBOL(cfg80211_iter_combinations);
+
+static void
+cfg80211_iter_sum_ifcombs(const struct ieee80211_iface_combination *c,
+                         void *data)
+{
+       int *num = data;
+       (*num)++;
+}
+
+int cfg80211_check_combinations(struct wiphy *wiphy,
+                               const int num_different_channels,
+                               const u8 radar_detect,
+                               const int iftype_num[NUM_NL80211_IFTYPES])
+{
+       int err, num = 0;
+
+       err = cfg80211_iter_combinations(wiphy, num_different_channels,
+                                        radar_detect, iftype_num,
+                                        cfg80211_iter_sum_ifcombs, &num);
+       if (err)
+               return err;
+       if (num == 0)
+               return -EBUSY;
+
+       return 0;
+}
+EXPORT_SYMBOL(cfg80211_check_combinations);
+
 int cfg80211_can_use_iftype_chan(struct cfg80211_registered_device *rdev,
                                 struct wireless_dev *wdev,
                                 enum nl80211_iftype iftype,
@@ -1276,7 +1371,6 @@ int cfg80211_can_use_iftype_chan(struct cfg80211_registered_device *rdev,
                                 u8 radar_detect)
 {
        struct wireless_dev *wdev_iter;
-       u32 used_iftypes = BIT(iftype);
        int num[NUM_NL80211_IFTYPES];
        struct ieee80211_channel
                        *used_channels[CFG80211_MAX_NUM_DIFFERENT_CHANNELS];
@@ -1284,7 +1378,7 @@ int cfg80211_can_use_iftype_chan(struct cfg80211_registered_device *rdev,
        enum cfg80211_chan_mode chmode;
        int num_different_channels = 0;
        int total = 1;
-       int i, j;
+       int i;
 
        ASSERT_RTNL();
 
@@ -1306,6 +1400,11 @@ int cfg80211_can_use_iftype_chan(struct cfg80211_registered_device *rdev,
 
        num[iftype] = 1;
 
+       /* TODO: We'll probably not need this anymore, since this
+        * should only be called with CHAN_MODE_UNDEFINED. There are
+        * still a couple of pending calls where other chanmodes are
+        * used, but we should get rid of them.
+        */
        switch (chanmode) {
        case CHAN_MODE_UNDEFINED:
                break;
@@ -1369,65 +1468,13 @@ int cfg80211_can_use_iftype_chan(struct cfg80211_registered_device *rdev,
 
                num[wdev_iter->iftype]++;
                total++;
-               used_iftypes |= BIT(wdev_iter->iftype);
        }
 
        if (total == 1 && !radar_detect)
                return 0;
 
-       for (i = 0; i < rdev->wiphy.n_iface_combinations; i++) {
-               const struct ieee80211_iface_combination *c;
-               struct ieee80211_iface_limit *limits;
-               u32 all_iftypes = 0;
-
-               c = &rdev->wiphy.iface_combinations[i];
-
-               if (total > c->max_interfaces)
-                       continue;
-               if (num_different_channels > c->num_different_channels)
-                       continue;
-
-               limits = kmemdup(c->limits, sizeof(limits[0]) * c->n_limits,
-                                GFP_KERNEL);
-               if (!limits)
-                       return -ENOMEM;
-
-               for (iftype = 0; iftype < NUM_NL80211_IFTYPES; iftype++) {
-                       if (rdev->wiphy.software_iftypes & BIT(iftype))
-                               continue;
-                       for (j = 0; j < c->n_limits; j++) {
-                               all_iftypes |= limits[j].types;
-                               if (!(limits[j].types & BIT(iftype)))
-                                       continue;
-                               if (limits[j].max < num[iftype])
-                                       goto cont;
-                               limits[j].max -= num[iftype];
-                       }
-               }
-
-               if (radar_detect && !(c->radar_detect_widths & radar_detect))
-                       goto cont;
-
-               /*
-                * Finally check that all iftypes that we're currently
-                * using are actually part of this combination. If they
-                * aren't then we can't use this combination and have
-                * to continue to the next.
-                */
-               if ((all_iftypes & used_iftypes) != used_iftypes)
-                       goto cont;
-
-               /*
-                * This combination covered all interface types and
-                * supported the requested numbers, so we're good.
-                */
-               kfree(limits);
-               return 0;
- cont:
-               kfree(limits);
-       }
-
-       return -EBUSY;
+       return cfg80211_check_combinations(&rdev->wiphy, num_different_channels,
+                                          radar_detect, num);
 }
 
 int ieee80211_get_ratemask(struct ieee80211_supported_band *sband,
index 5661a54ac7ee4ed1c1865d855e1b2681c67d07cc..11120bb14162505043579628bed2ad131ba41f7d 100644 (file)
@@ -73,7 +73,7 @@ int cfg80211_wext_siwmode(struct net_device *dev, struct iw_request_info *info,
        struct vif_params vifparams;
        enum nl80211_iftype type;
 
-       rdev = wiphy_to_dev(wdev->wiphy);
+       rdev = wiphy_to_rdev(wdev->wiphy);
 
        switch (*mode) {
        case IW_MODE_INFRA:
@@ -253,12 +253,12 @@ EXPORT_SYMBOL_GPL(cfg80211_wext_giwrange);
 
 /**
  * cfg80211_wext_freq - get wext frequency for non-"auto"
- * @wiphy: the wiphy
+ * @dev: the net device
  * @freq: the wext freq encoding
  *
  * Returns a frequency, or a negative error code, or 0 for auto.
  */
-int cfg80211_wext_freq(struct wiphy *wiphy, struct iw_freq *freq)
+int cfg80211_wext_freq(struct iw_freq *freq)
 {
        /*
         * Parse frequency - return 0 for auto and
@@ -286,7 +286,7 @@ int cfg80211_wext_siwrts(struct net_device *dev,
                         struct iw_param *rts, char *extra)
 {
        struct wireless_dev *wdev = dev->ieee80211_ptr;
-       struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
        u32 orts = wdev->wiphy->rts_threshold;
        int err;
 
@@ -324,7 +324,7 @@ int cfg80211_wext_siwfrag(struct net_device *dev,
                          struct iw_param *frag, char *extra)
 {
        struct wireless_dev *wdev = dev->ieee80211_ptr;
-       struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
        u32 ofrag = wdev->wiphy->frag_threshold;
        int err;
 
@@ -364,7 +364,7 @@ static int cfg80211_wext_siwretry(struct net_device *dev,
                                  struct iw_param *retry, char *extra)
 {
        struct wireless_dev *wdev = dev->ieee80211_ptr;
-       struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
        u32 changed = 0;
        u8 olong = wdev->wiphy->retry_long;
        u8 oshort = wdev->wiphy->retry_short;
@@ -587,7 +587,7 @@ static int cfg80211_wext_siwencode(struct net_device *dev,
                                   struct iw_point *erq, char *keybuf)
 {
        struct wireless_dev *wdev = dev->ieee80211_ptr;
-       struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
        int idx, err;
        bool remove = false;
        struct key_params params;
@@ -647,7 +647,7 @@ static int cfg80211_wext_siwencodeext(struct net_device *dev,
                                      struct iw_point *erq, char *extra)
 {
        struct wireless_dev *wdev = dev->ieee80211_ptr;
-       struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
        struct iw_encode_ext *ext = (struct iw_encode_ext *) extra;
        const u8 *addr;
        int idx;
@@ -775,7 +775,7 @@ static int cfg80211_wext_siwfreq(struct net_device *dev,
                                 struct iw_freq *wextfreq, char *extra)
 {
        struct wireless_dev *wdev = dev->ieee80211_ptr;
-       struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
        struct cfg80211_chan_def chandef = {
                .width = NL80211_CHAN_WIDTH_20_NOHT,
        };
@@ -787,7 +787,7 @@ static int cfg80211_wext_siwfreq(struct net_device *dev,
        case NL80211_IFTYPE_ADHOC:
                return cfg80211_ibss_wext_siwfreq(dev, info, wextfreq, extra);
        case NL80211_IFTYPE_MONITOR:
-               freq = cfg80211_wext_freq(wdev->wiphy, wextfreq);
+               freq = cfg80211_wext_freq(wextfreq);
                if (freq < 0)
                        return freq;
                if (freq == 0)
@@ -798,7 +798,7 @@ static int cfg80211_wext_siwfreq(struct net_device *dev,
                        return -EINVAL;
                return cfg80211_set_monitor_channel(rdev, &chandef);
        case NL80211_IFTYPE_MESH_POINT:
-               freq = cfg80211_wext_freq(wdev->wiphy, wextfreq);
+               freq = cfg80211_wext_freq(wextfreq);
                if (freq < 0)
                        return freq;
                if (freq == 0)
@@ -818,7 +818,7 @@ static int cfg80211_wext_giwfreq(struct net_device *dev,
                                 struct iw_freq *freq, char *extra)
 {
        struct wireless_dev *wdev = dev->ieee80211_ptr;
-       struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
        struct cfg80211_chan_def chandef;
        int ret;
 
@@ -847,7 +847,7 @@ static int cfg80211_wext_siwtxpower(struct net_device *dev,
                                    union iwreq_data *data, char *extra)
 {
        struct wireless_dev *wdev = dev->ieee80211_ptr;
-       struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
        enum nl80211_tx_power_setting type;
        int dbm = 0;
 
@@ -899,7 +899,7 @@ static int cfg80211_wext_giwtxpower(struct net_device *dev,
                                    union iwreq_data *data, char *extra)
 {
        struct wireless_dev *wdev = dev->ieee80211_ptr;
-       struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
        int err, val;
 
        if ((data->txpower.flags & IW_TXPOW_TYPE) != IW_TXPOW_DBM)
@@ -1119,7 +1119,7 @@ static int cfg80211_wext_siwpower(struct net_device *dev,
                                  struct iw_param *wrq, char *extra)
 {
        struct wireless_dev *wdev = dev->ieee80211_ptr;
-       struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
        bool ps = wdev->ps;
        int timeout = wdev->ps_timeout;
        int err;
@@ -1177,7 +1177,7 @@ static int cfg80211_wds_wext_siwap(struct net_device *dev,
                                   struct sockaddr *addr, char *extra)
 {
        struct wireless_dev *wdev = dev->ieee80211_ptr;
-       struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
        int err;
 
        if (WARN_ON(wdev->iftype != NL80211_IFTYPE_WDS))
@@ -1221,7 +1221,7 @@ static int cfg80211_wext_siwrate(struct net_device *dev,
                                 struct iw_param *rate, char *extra)
 {
        struct wireless_dev *wdev = dev->ieee80211_ptr;
-       struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
        struct cfg80211_bitrate_mask mask;
        u32 fixed, maxrate;
        struct ieee80211_supported_band *sband;
@@ -1272,7 +1272,7 @@ static int cfg80211_wext_giwrate(struct net_device *dev,
                                 struct iw_param *rate, char *extra)
 {
        struct wireless_dev *wdev = dev->ieee80211_ptr;
-       struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
        /* we are under RTNL - globally locked - so can use a static struct */
        static struct station_info sinfo;
        u8 addr[ETH_ALEN];
@@ -1310,7 +1310,7 @@ static int cfg80211_wext_giwrate(struct net_device *dev,
 static struct iw_statistics *cfg80211_wireless_stats(struct net_device *dev)
 {
        struct wireless_dev *wdev = dev->ieee80211_ptr;
-       struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
        /* we are under RTNL - globally locked - so can use static structs */
        static struct iw_statistics wstats;
        static struct station_info sinfo;
@@ -1449,7 +1449,7 @@ static int cfg80211_wext_siwpmksa(struct net_device *dev,
                                  struct iw_point *data, char *extra)
 {
        struct wireless_dev *wdev = dev->ieee80211_ptr;
-       struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
        struct cfg80211_pmksa cfg_pmksa;
        struct iw_pmksa *pmksa = (struct iw_pmksa *)extra;
 
index 5d766b0118e81969ff4f24c59b88bffdaa6496ff..ebcacca2f731941123efb22c8067a6c34aaa9ea1 100644 (file)
@@ -50,7 +50,7 @@ int cfg80211_wext_siwgenie(struct net_device *dev,
                           struct iw_point *data, char *extra);
 
 
-int cfg80211_wext_freq(struct wiphy *wiphy, struct iw_freq *freq);
+int cfg80211_wext_freq(struct iw_freq *freq);
 
 
 extern const struct iw_handler_def cfg80211_wext_handler;
index 86c331a65664a77bfe6c083224b7eae2c91f5b9c..c7e5c8eb4f24708a27d90ae298a85c825ee81e38 100644 (file)
@@ -67,7 +67,7 @@ int cfg80211_mgd_wext_siwfreq(struct net_device *dev,
                              struct iw_freq *wextfreq, char *extra)
 {
        struct wireless_dev *wdev = dev->ieee80211_ptr;
-       struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
        struct ieee80211_channel *chan = NULL;
        int err, freq;
 
@@ -75,7 +75,7 @@ int cfg80211_mgd_wext_siwfreq(struct net_device *dev,
        if (WARN_ON(wdev->iftype != NL80211_IFTYPE_STATION))
                return -EINVAL;
 
-       freq = cfg80211_wext_freq(wdev->wiphy, wextfreq);
+       freq = cfg80211_wext_freq(wextfreq);
        if (freq < 0)
                return freq;
 
@@ -169,7 +169,7 @@ int cfg80211_mgd_wext_siwessid(struct net_device *dev,
                               struct iw_point *data, char *ssid)
 {
        struct wireless_dev *wdev = dev->ieee80211_ptr;
-       struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
        size_t len = data->length;
        int err;
 
@@ -260,7 +260,7 @@ int cfg80211_mgd_wext_siwap(struct net_device *dev,
                            struct sockaddr *ap_addr, char *extra)
 {
        struct wireless_dev *wdev = dev->ieee80211_ptr;
-       struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
        u8 *bssid = ap_addr->sa_data;
        int err;
 
@@ -333,7 +333,7 @@ int cfg80211_wext_siwgenie(struct net_device *dev,
                           struct iw_point *data, char *extra)
 {
        struct wireless_dev *wdev = dev->ieee80211_ptr;
-       struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
        u8 *ie = extra;
        int ie_len = data->length, err;
 
@@ -390,7 +390,7 @@ int cfg80211_wext_siwmlme(struct net_device *dev,
        if (!wdev)
                return -EOPNOTSUPP;
 
-       rdev = wiphy_to_dev(wdev->wiphy);
+       rdev = wiphy_to_rdev(wdev->wiphy);
 
        if (wdev->iftype != NL80211_IFTYPE_STATION)
                return -EINVAL;
index 375267d15c8f2ad61b8bce3be7af8443a0788f08..a8ef5108e0d86cbc5c411f3db378fde5a0d54f18 100644 (file)
@@ -2773,21 +2773,19 @@ static struct notifier_block xfrm_dev_notifier = {
 static int __net_init xfrm_statistics_init(struct net *net)
 {
        int rv;
-
-       if (snmp_mib_init((void __percpu **)net->mib.xfrm_statistics,
-                         sizeof(struct linux_xfrm_mib),
-                         __alignof__(struct linux_xfrm_mib)) < 0)
+       net->mib.xfrm_statistics = alloc_percpu(struct linux_xfrm_mib);
+       if (!net->mib.xfrm_statistics)
                return -ENOMEM;
        rv = xfrm_proc_init(net);
        if (rv < 0)
-               snmp_mib_free((void __percpu **)net->mib.xfrm_statistics);
+               free_percpu(net->mib.xfrm_statistics);
        return rv;
 }
 
 static void xfrm_statistics_fini(struct net *net)
 {
        xfrm_proc_fini(net);
-       snmp_mib_free((void __percpu **)net->mib.xfrm_statistics);
+       free_percpu(net->mib.xfrm_statistics);
 }
 #else
 static int __net_init xfrm_statistics_init(struct net *net)
index fc5abd0b456f3a3abf8163821f22577b8d700f6f..9c4fbd8935f48e28c3c86d9da443904f55f93ec3 100644 (file)
@@ -54,8 +54,7 @@ static int xfrm_statistics_seq_show(struct seq_file *seq, void *v)
        int i;
        for (i = 0; xfrm_mib_list[i].name; i++)
                seq_printf(seq, "%-24s\t%lu\n", xfrm_mib_list[i].name,
-                          snmp_fold_field((void __percpu **)
-                                          net->mib.xfrm_statistics,
+                          snmp_fold_field(net->mib.xfrm_statistics,
                                           xfrm_mib_list[i].entry));
        return 0;
 }
index 3d4b4c4640910386ab17817e69cbf4cafe2511ac..fd9a16a6d1de36faad11b3a8a4676cac61969f37 100644 (file)
@@ -2340,7 +2340,7 @@ static int xfrm_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
        link = &xfrm_dispatch[type];
 
        /* All operations require privileges, even GET */
-       if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
+       if (!netlink_net_capable(skb, CAP_NET_ADMIN))
                return -EPERM;
 
        if ((type == (XFRM_MSG_GETSA - XFRM_MSG_BASE) ||
index cc49062acdeecf85259f646df09abe22f1019a5d..1052d4834a44f502bda4f4f1ebe1202ace1608f5 100644 (file)
 #define EM_ARCOMPACT   93
 #endif
 
+#ifndef EM_XTENSA
+#define EM_XTENSA      94
+#endif
+
 #ifndef EM_AARCH64
 #define EM_AARCH64     183
 #endif
@@ -281,6 +285,7 @@ do_file(char const *const fname)
        case EM_AARCH64:
        case EM_MICROBLAZE:
        case EM_MIPS:
+       case EM_XTENSA:
                break;
        }  /* end switch */
 
index b4beb77967b17949daf46623a822cb7e960e8829..2c7341dbc5d68d1948ad0efa713ad3a85307608e 100644 (file)
@@ -3317,9 +3317,9 @@ static int selinux_file_fcntl(struct file *file, unsigned int cmd,
        case F_GETLK:
        case F_SETLK:
        case F_SETLKW:
-       case F_GETLKP:
-       case F_SETLKP:
-       case F_SETLKPW:
+       case F_OFD_GETLK:
+       case F_OFD_SETLK:
+       case F_OFD_SETLKW:
 #if BITS_PER_LONG == 32
        case F_GETLK64:
        case F_SETLK64:
index 14d04e63b1f0e09ef15b4cf64057bd7cc861ed1d..be491a74c1edc4a279f76121dab615305c3a47f2 100644 (file)
@@ -147,7 +147,7 @@ struct security_class_mapping secclass_map[] = {
        { "peer", { "recv", NULL } },
        { "capability2",
          { "mac_override", "mac_admin", "syslog", "wake_alarm", "block_suspend",
-           NULL } },
+           "audit_read", NULL } },
        { "kernel_service", { "use_as_override", "create_files_as", NULL } },
        { "tun_socket",
          { COMMON_SOCK_PERMS, "attach_queue", NULL } },
index 248b90abb8825a62e9530a0629cbf432898898d3..480bbddbd801bf002e4cc43fb8c7c0f762ec40c8 100644 (file)
@@ -1059,24 +1059,26 @@ static void azx_init_cmd_io(struct azx *chip)
 
        /* reset the corb hw read pointer */
        azx_writew(chip, CORBRP, ICH6_CORBRP_RST);
-       for (timeout = 1000; timeout > 0; timeout--) {
-               if ((azx_readw(chip, CORBRP) & ICH6_CORBRP_RST) == ICH6_CORBRP_RST)
-                       break;
-               udelay(1);
-       }
-       if (timeout <= 0)
-               dev_err(chip->card->dev, "CORB reset timeout#1, CORBRP = %d\n",
-                       azx_readw(chip, CORBRP));
+       if (!(chip->driver_caps & AZX_DCAPS_CORBRP_SELF_CLEAR)) {
+               for (timeout = 1000; timeout > 0; timeout--) {
+                       if ((azx_readw(chip, CORBRP) & ICH6_CORBRP_RST) == ICH6_CORBRP_RST)
+                               break;
+                       udelay(1);
+               }
+               if (timeout <= 0)
+                       dev_err(chip->card->dev, "CORB reset timeout#1, CORBRP = %d\n",
+                               azx_readw(chip, CORBRP));
 
-       azx_writew(chip, CORBRP, 0);
-       for (timeout = 1000; timeout > 0; timeout--) {
-               if (azx_readw(chip, CORBRP) == 0)
-                       break;
-               udelay(1);
+               azx_writew(chip, CORBRP, 0);
+               for (timeout = 1000; timeout > 0; timeout--) {
+                       if (azx_readw(chip, CORBRP) == 0)
+                               break;
+                       udelay(1);
+               }
+               if (timeout <= 0)
+                       dev_err(chip->card->dev, "CORB reset timeout#2, CORBRP = %d\n",
+                               azx_readw(chip, CORBRP));
        }
-       if (timeout <= 0)
-               dev_err(chip->card->dev, "CORB reset timeout#2, CORBRP = %d\n",
-                       azx_readw(chip, CORBRP));
 
        /* enable corb dma */
        azx_writeb(chip, CORBCTL, ICH6_CORBCTL_RUN);
index d6bca62ef387b92b499dcf5954d5c783543055d1..b540ad71eb0d733ab217550a66ac40eb35e22da6 100644 (file)
@@ -249,7 +249,8 @@ enum {
 /* quirks for Nvidia */
 #define AZX_DCAPS_PRESET_NVIDIA \
        (AZX_DCAPS_NVIDIA_SNOOP | AZX_DCAPS_RIRB_DELAY | AZX_DCAPS_NO_MSI |\
-        AZX_DCAPS_ALIGN_BUFSIZE | AZX_DCAPS_NO_64BIT)
+        AZX_DCAPS_ALIGN_BUFSIZE | AZX_DCAPS_NO_64BIT |\
+        AZX_DCAPS_CORBRP_SELF_CLEAR)
 
 #define AZX_DCAPS_PRESET_CTHDA \
        (AZX_DCAPS_NO_MSI | AZX_DCAPS_POSFIX_LPIB | AZX_DCAPS_4K_BDLE_BOUNDARY)
index ba38b819f9847de7522de9171c1a7794aecc7a8f..4a7cb01fa91226b2cfd3a4a582d02d9899ffa6e0 100644 (file)
@@ -189,6 +189,7 @@ enum { SDI0, SDI1, SDI2, SDI3, SDO0, SDO1, SDO2, SDO3 };
 #define AZX_DCAPS_COUNT_LPIB_DELAY  (1 << 25)  /* Take LPIB as delay */
 #define AZX_DCAPS_PM_RUNTIME   (1 << 26)       /* runtime PM support */
 #define AZX_DCAPS_I915_POWERWELL (1 << 27)     /* HSW i915 powerwell support */
+#define AZX_DCAPS_CORBRP_SELF_CLEAR (1 << 28)  /* CORBRP clears itself after reset */
 
 /* position fix mode */
 enum {
index c643dfc0a82612c5a2672c5211e6e1d46c5102f3..c1952c9103398953ba0e05078f2f1a21763c8e90 100644 (file)
@@ -4621,6 +4621,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1028, 0x0667, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x0668, "Dell", ALC255_FIXUP_DELL2_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x0669, "Dell", ALC255_FIXUP_DELL2_MIC_NO_PRESENCE),
+       SND_PCI_QUIRK(0x1028, 0x0674, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x067f, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x15cc, "Dell X5 Precision", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x15cd, "Dell X5 Precision", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE),
index f500905e9373510d2bcfdb583b0569cec5994708..2acf82f4a08a8bed4db26c6c444a0eabec688412 100644 (file)
@@ -1018,13 +1018,13 @@ static int alc5623_i2c_probe(struct i2c_client *client,
                dev_err(&client->dev, "failed to read vendor ID1: %d\n", ret);
                return ret;
        }
-       vid1 = ((vid1 & 0xff) << 8) | (vid1 >> 8);
 
        ret = regmap_read(alc5623->regmap, ALC5623_VENDOR_ID2, &vid2);
        if (ret < 0) {
                dev_err(&client->dev, "failed to read vendor ID2: %d\n", ret);
                return ret;
        }
+       vid2 >>= 8;
 
        if ((vid1 != 0x10ec) || (vid2 != id->driver_data)) {
                dev_err(&client->dev, "unknown or wrong codec\n");
index 460d35547a683d226521591333ce06fe1c5de634..2213a037c893107bcfa584701d58e0541bd59e13 100644 (file)
@@ -1229,8 +1229,10 @@ static int cs42l52_i2c_probe(struct i2c_client *i2c_client,
        }
 
        if (cs42l52->pdata.reset_gpio) {
-               ret = gpio_request_one(cs42l52->pdata.reset_gpio,
-                                      GPIOF_OUT_INIT_HIGH, "CS42L52 /RST");
+               ret = devm_gpio_request_one(&i2c_client->dev,
+                                           cs42l52->pdata.reset_gpio,
+                                           GPIOF_OUT_INIT_HIGH,
+                                           "CS42L52 /RST");
                if (ret < 0) {
                        dev_err(&i2c_client->dev, "Failed to request /RST %d: %d\n",
                                cs42l52->pdata.reset_gpio, ret);
index 0ee60a19a26334dcae0484244fcf9d374965fc88..ae3717992d568fb2ba533634a25e306a9e8fd05b 100644 (file)
@@ -1443,8 +1443,10 @@ static int cs42l73_i2c_probe(struct i2c_client *i2c_client,
        i2c_set_clientdata(i2c_client, cs42l73);
 
        if (cs42l73->pdata.reset_gpio) {
-               ret = gpio_request_one(cs42l73->pdata.reset_gpio,
-                                      GPIOF_OUT_INIT_HIGH, "CS42L73 /RST");
+               ret = devm_gpio_request_one(&i2c_client->dev,
+                                           cs42l73->pdata.reset_gpio,
+                                           GPIOF_OUT_INIT_HIGH,
+                                           "CS42L73 /RST");
                if (ret < 0) {
                        dev_err(&i2c_client->dev, "Failed to request /RST %d: %d\n",
                                cs42l73->pdata.reset_gpio, ret);
index b1835103e9b4002ab44429d40bb16da8372f65aa..d7349bc89ad3085430b57eb0b67a14ea8b3886e6 100644 (file)
@@ -1399,7 +1399,6 @@ static int aic3x_probe(struct snd_soc_codec *codec)
        }
 
        aic3x_add_widgets(codec);
-       list_add(&aic3x->list, &reset_list);
 
        return 0;
 
@@ -1569,7 +1568,13 @@ static int aic3x_i2c_probe(struct i2c_client *i2c,
 
        ret = snd_soc_register_codec(&i2c->dev,
                        &soc_codec_dev_aic3x, &aic3x_dai, 1);
-       return ret;
+
+       if (ret != 0)
+               goto err_gpio;
+
+       list_add(&aic3x->list, &reset_list);
+
+       return 0;
 
 err_gpio:
        if (gpio_is_valid(aic3x->gpio_reset) &&
index b1266790d1174a74497e81d1848436300894b5ea..605a10b2112b3808e8d5a95e282e51daf69eec62 100644 (file)
@@ -144,8 +144,8 @@ enum spdif_gainsel {
 
 /* SPDIF Clock register */
 #define STC_SYSCLK_DIV_OFFSET          11
-#define STC_SYSCLK_DIV_MASK            (0x1ff << STC_TXCLK_SRC_OFFSET)
-#define STC_SYSCLK_DIV(x)              ((((x) - 1) << STC_TXCLK_DIV_OFFSET) & STC_SYSCLK_DIV_MASK)
+#define STC_SYSCLK_DIV_MASK            (0x1ff << STC_SYSCLK_DIV_OFFSET)
+#define STC_SYSCLK_DIV(x)              ((((x) - 1) << STC_SYSCLK_DIV_OFFSET) & STC_SYSCLK_DIV_MASK)
 #define STC_TXCLK_SRC_OFFSET           8
 #define STC_TXCLK_SRC_MASK             (0x7 << STC_TXCLK_SRC_OFFSET)
 #define STC_TXCLK_SRC_SET(x)           ((x << STC_TXCLK_SRC_OFFSET) & STC_TXCLK_SRC_MASK)
index fe8e81aad6461faf545b179f6d1a912369872e6a..30ca14a6a83595d5acf4e644d7f8d50827a99a98 100644 (file)
@@ -136,7 +136,7 @@ struct sst_module_data {
        enum sst_data_type data_type;   /* type of module data */
 
        u32 size;               /* size in bytes */
-       u32 offset;             /* offset in FW file */
+       int32_t offset;         /* offset in FW file */
        u32 data_offset;        /* offset in ADSP memory space */
        void *data;             /* module data */
 };
index f46bb4ddde6fc7550573e5fdd0235afda429044a..50e4246d4b57a2df8c9ad0c54deabb4e346ca572 100644 (file)
@@ -617,7 +617,7 @@ static void hsw_notification_work(struct work_struct *work)
        case IPC_POSITION_CHANGED:
                trace_ipc_notification("DSP stream position changed for",
                        stream->reply.stream_hw_id);
-               sst_dsp_inbox_read(hsw->dsp, pos, sizeof(pos));
+               sst_dsp_inbox_read(hsw->dsp, pos, sizeof(*pos));
 
                if (stream->notify_position)
                        stream->notify_position(stream, stream->pdata);
@@ -991,7 +991,8 @@ int sst_hsw_stream_get_volume(struct sst_hsw *hsw, struct sst_hsw_stream *stream
                return -EINVAL;
 
        sst_dsp_read(hsw->dsp, volume,
-               stream->reply.volume_register_address[channel], sizeof(volume));
+               stream->reply.volume_register_address[channel],
+               sizeof(*volume));
 
        return 0;
 }
@@ -1609,7 +1610,7 @@ int sst_hsw_dx_set_state(struct sst_hsw *hsw,
        trace_ipc_request("PM enter Dx state", state);
 
        ret = ipc_tx_message_wait(hsw, header, &state_, sizeof(state_),
-               dx, sizeof(dx));
+               dx, sizeof(*dx));
        if (ret < 0) {
                dev_err(hsw->dev, "ipc: error set dx state %d failed\n", state);
                return ret;
index be873c1b0c204f4f902bfc7e2edad8c10d623e15..d32c540555c41b6c3f9b8f5817a5baa09b5ca47b 100644 (file)
@@ -1,10 +1,8 @@
 #
 # Jz4740 Platform Support
 #
-snd-soc-jz4740-objs := jz4740-pcm.o
 snd-soc-jz4740-i2s-objs := jz4740-i2s.o
 
-obj-$(CONFIG_SND_JZ4740_SOC) += snd-soc-jz4740.o
 obj-$(CONFIG_SND_JZ4740_SOC_I2S) += snd-soc-jz4740-i2s.o
 
 # Jz4740 Machine Support
index 6232b7d307aab2c553bad3c7b6a19f7f69ac997e..4d0720ed5a906d86315971a869028011c3c3a7af 100644 (file)
@@ -258,7 +258,7 @@ static int rsnd_src_init(struct rsnd_mod *mod,
 {
        struct rsnd_src *src = rsnd_mod_to_src(mod);
 
-       clk_enable(src->clk);
+       clk_prepare_enable(src->clk);
 
        return 0;
 }
@@ -269,7 +269,7 @@ static int rsnd_src_quit(struct rsnd_mod *mod,
 {
        struct rsnd_src *src = rsnd_mod_to_src(mod);
 
-       clk_disable(src->clk);
+       clk_disable_unprepare(src->clk);
 
        return 0;
 }
index 4b7e20603dd7be8032198291ee08ed9b95de88dd..1d8387c25bd85f5b312db49e42109fdfd064815f 100644 (file)
@@ -171,7 +171,7 @@ static void rsnd_ssi_hw_start(struct rsnd_ssi *ssi,
        u32 cr;
 
        if (0 == ssi->usrcnt) {
-               clk_enable(ssi->clk);
+               clk_prepare_enable(ssi->clk);
 
                if (rsnd_dai_is_clk_master(rdai)) {
                        if (rsnd_ssi_clk_from_parent(ssi))
@@ -230,7 +230,7 @@ static void rsnd_ssi_hw_stop(struct rsnd_ssi *ssi,
                                rsnd_ssi_master_clk_stop(ssi);
                }
 
-               clk_disable(ssi->clk);
+               clk_disable_unprepare(ssi->clk);
        }
 
        dev_dbg(dev, "ssi%d hw stopped\n", rsnd_mod_id(&ssi->mod));
index c8a780d0d057f43b08e74fc7c6d57f414e8aecde..7769b0a2bc5a5287f932d20524216f7c390fd4c7 100644 (file)
@@ -254,7 +254,6 @@ static int dapm_kcontrol_data_alloc(struct snd_soc_dapm_widget *widget,
 static void dapm_kcontrol_free(struct snd_kcontrol *kctl)
 {
        struct dapm_kcontrol_data *data = snd_kcontrol_chip(kctl);
-       kfree(data->widget);
        kfree(data->wlist);
        kfree(data);
 }
index 7c43479623537af4f0d4179f4cce195cbea3e9b1..a74fba6d774353d33fac7f04b71abdd241e0218e 100644 (file)
@@ -12,8 +12,8 @@
 char debugfs_mountpoint[PATH_MAX + 1] = "/sys/kernel/debug";
 
 static const char * const debugfs_known_mountpoints[] = {
-       "/sys/kernel/debug/",
-       "/debug/",
+       "/sys/kernel/debug",
+       "/debug",
        0,
 };
 
index baec7d887da4fafeeacbda82697ecff6200c95da..b83184f2d484f59f3a888648fd3f548c0dc37d12 100644 (file)
@@ -4344,6 +4344,7 @@ static void pretty_print(struct trace_seq *s, void *data, int size, struct event
                                              format, len_arg, arg);
                                trace_seq_terminate(&p);
                                trace_seq_puts(s, p.buffer);
+                               trace_seq_destroy(&p);
                                arg = arg->next;
                                break;
                        default:
index 791c539374c726b7e4d3ad8f6f75aad575692787..feab942816343aba5023d1b6ab588322e7d45506 100644 (file)
@@ -876,8 +876,8 @@ struct event_filter {
 struct event_filter *pevent_filter_alloc(struct pevent *pevent);
 
 /* for backward compatibility */
-#define FILTER_NONE            PEVENT_ERRNO__FILTER_NOT_FOUND
-#define FILTER_NOEXIST         PEVENT_ERRNO__NO_FILTER
+#define FILTER_NONE            PEVENT_ERRNO__NO_FILTER
+#define FILTER_NOEXIST         PEVENT_ERRNO__FILTER_NOT_FOUND
 #define FILTER_MISS            PEVENT_ERRNO__FILTER_MISS
 #define FILTER_MATCH           PEVENT_ERRNO__FILTER_MATCH
 
index bb31813e43ddca8bd2bad4544e593d8c6df418f2..9a287bec695a3630bc43f19e3376d8cff36d2856 100644 (file)
@@ -820,7 +820,7 @@ do_div:
                r->A &= r->X;
                break;
        case BPF_ALU_AND | BPF_K:
-               r->A &= r->X;
+               r->A &= K;
                break;
        case BPF_ALU_OR | BPF_X:
                r->A |= r->X;
index bf7be77ddd621238aa2a26ca2103b309bdcd089f..833a96611da6d042e089294990801817d0f1ade9 100644 (file)
@@ -92,6 +92,7 @@ extern void yyerror(const char *str);
 "#"?("cpu")    { return K_CPU; }
 "#"?("vlan_tci") { return K_VLANT; }
 "#"?("vlan_pr")        { return K_VLANP; }
+"#"?("rand")   { return K_RAND; }
 
 ":"            { return ':'; }
 ","            { return ','; }
index d15efc989ef500ac3f9998cf660d8e2bd22627e7..e6306c51c26f9e7cb53342ad088a9db1e4e32423 100644 (file)
@@ -56,7 +56,7 @@ static void bpf_set_jmp_label(char *label, enum jmp_type type);
 %token OP_LDXI
 
 %token K_PKT_LEN K_PROTO K_TYPE K_NLATTR K_NLATTR_NEST K_MARK K_QUEUE K_HATYPE
-%token K_RXHASH K_CPU K_IFIDX K_VLANT K_VLANP K_POFF
+%token K_RXHASH K_CPU K_IFIDX K_VLANT K_VLANP K_POFF K_RAND
 
 %token ':' ',' '[' ']' '(' ')' 'x' 'a' '+' 'M' '*' '&' '#' '%'
 
@@ -164,6 +164,9 @@ ldb
        | OP_LDB K_POFF {
                bpf_set_curr_instr(BPF_LD | BPF_B | BPF_ABS, 0, 0,
                                   SKF_AD_OFF + SKF_AD_PAY_OFFSET); }
+       | OP_LDB K_RAND {
+               bpf_set_curr_instr(BPF_LD | BPF_B | BPF_ABS, 0, 0,
+                                  SKF_AD_OFF + SKF_AD_RANDOM); }
        ;
 
 ldh
@@ -212,6 +215,9 @@ ldh
        | OP_LDH K_POFF {
                bpf_set_curr_instr(BPF_LD | BPF_H | BPF_ABS, 0, 0,
                                   SKF_AD_OFF + SKF_AD_PAY_OFFSET); }
+       | OP_LDH K_RAND {
+               bpf_set_curr_instr(BPF_LD | BPF_H | BPF_ABS, 0, 0,
+                                  SKF_AD_OFF + SKF_AD_RANDOM); }
        ;
 
 ldi
@@ -265,6 +271,9 @@ ld
        | OP_LD K_POFF {
                bpf_set_curr_instr(BPF_LD | BPF_W | BPF_ABS, 0, 0,
                                   SKF_AD_OFF + SKF_AD_PAY_OFFSET); }
+       | OP_LD K_RAND {
+               bpf_set_curr_instr(BPF_LD | BPF_W | BPF_ABS, 0, 0,
+                                  SKF_AD_OFF + SKF_AD_RANDOM); }
        | OP_LD 'M' '[' number ']' {
                bpf_set_curr_instr(BPF_LD | BPF_MEM, 0, 0, $4); }
        | OP_LD '[' 'x' '+' number ']' {
index cfe0cdcda3de990793b70d128e2267f53baac5ba..c5baf9c591b7bb5a2c280e173f4e0a2b561285fa 100644 (file)
@@ -43,8 +43,7 @@ static void get_exec_path(char *tpath, size_t size)
        free(path);
 }
 
-static void get_asm_insns(uint8_t *image, size_t len, unsigned long base,
-                         int opcodes)
+static void get_asm_insns(uint8_t *image, size_t len, int opcodes)
 {
        int count, i, pc = 0;
        char tpath[256];
@@ -107,13 +106,13 @@ static void put_klog_buff(char *buff)
 }
 
 static int get_last_jit_image(char *haystack, size_t hlen,
-                             uint8_t *image, size_t ilen,
-                             unsigned long *base)
+                             uint8_t *image, size_t ilen)
 {
        char *ptr, *pptr, *tmp;
        off_t off = 0;
        int ret, flen, proglen, pass, ulen = 0;
        regmatch_t pmatch[1];
+       unsigned long base;
        regex_t regex;
 
        if (hlen == 0)
@@ -136,7 +135,7 @@ static int get_last_jit_image(char *haystack, size_t hlen,
 
        ptr = haystack + off - (pmatch[0].rm_eo - pmatch[0].rm_so);
        ret = sscanf(ptr, "flen=%d proglen=%d pass=%d image=%lx",
-                    &flen, &proglen, &pass, base);
+                    &flen, &proglen, &pass, &base);
        if (ret != 4)
                return 0;
 
@@ -162,7 +161,7 @@ static int get_last_jit_image(char *haystack, size_t hlen,
        assert(ulen == proglen);
        printf("%d bytes emitted from JIT compiler (pass:%d, flen:%d)\n",
               proglen, pass, flen);
-       printf("%lx + <x>:\n", *base);
+       printf("%lx + <x>:\n", base);
 
        regfree(&regex);
        return ulen;
@@ -172,8 +171,7 @@ int main(int argc, char **argv)
 {
        int len, klen, opcodes = 0;
        char *kbuff;
-       unsigned long base;
-       uint8_t image[4096];
+       static uint8_t image[32768];
 
        if (argc > 1) {
                if (!strncmp("-o", argv[argc - 1], 2)) {
@@ -189,9 +187,9 @@ int main(int argc, char **argv)
 
        kbuff = get_klog_buff(&klen);
 
-       len = get_last_jit_image(kbuff, klen, image, sizeof(image), &base);
-       if (len > 0 && base > 0)
-               get_asm_insns(image, len, base, opcodes);
+       len = get_last_jit_image(kbuff, klen, image, sizeof(image));
+       if (len > 0)
+               get_asm_insns(image, len, opcodes);
 
        put_klog_buff(kbuff);
 
index e96923310d5780e2fe62e45736b2511f43aa907d..895edd32930ce7283cbda2df8914ba44462fc7ef 100644 (file)
@@ -589,7 +589,7 @@ $(GTK_OBJS): $(OUTPUT)%.o: %.c $(LIB_H)
        $(QUIET_CC)$(CC) -o $@ -c -fPIC $(CFLAGS) $(GTK_CFLAGS) $<
 
 $(OUTPUT)libperf-gtk.so: $(GTK_OBJS) $(PERFLIBS)
-       $(QUIET_LINK)$(CC) -o $@ -shared $(ALL_LDFLAGS) $(filter %.o,$^) $(GTK_LIBS)
+       $(QUIET_LINK)$(CC) -o $@ -shared $(LDFLAGS) $(filter %.o,$^) $(GTK_LIBS)
 
 $(OUTPUT)builtin-help.o: builtin-help.c $(OUTPUT)common-cmds.h $(OUTPUT)PERF-CFLAGS
        $(QUIET_CC)$(CC) -o $@ -c $(CFLAGS) \
index b602ad93ce630ae3f611ee74f0bb6277c367a09b..83bc2385e6d3c2820958b279f77c4d08342b1e06 100644 (file)
@@ -23,9 +23,10 @@ static int sample_ustack(struct perf_sample *sample,
 
        sp = (unsigned long) regs[PERF_REG_X86_SP];
 
-       map = map_groups__find(&thread->mg, MAP__FUNCTION, (u64) sp);
+       map = map_groups__find(&thread->mg, MAP__VARIABLE, (u64) sp);
        if (!map) {
                pr_debug("failed to get stack map\n");
+               free(buf);
                return -1;
        }
 
index 99167bf644eaa8b376060b6b6d42977e9cd09425..60875d5c556c217d3433487d65cf66382a78d25b 100644 (file)
@@ -1,4 +1,3 @@
-
 #include <linux/linkage.h>
 
 #define AX      0
@@ -90,3 +89,10 @@ ENTRY(perf_regs_load)
        ret
 ENDPROC(perf_regs_load)
 #endif
+
+/*
+ * We need to provide note.GNU-stack section, saying that we want
+ * NOT executable stack. Otherwise the final linking will assume that
+ * the ELF stack should not be restricted at all and set it RWX.
+ */
+.section .note.GNU-stack,"",@progbits
index 21c164b8f9db2a70a9467e1341fbb6a49a3536a1..0f1e5a2f6ad71651ad1be15fb98a6174ed53646a 100644 (file)
@@ -404,6 +404,7 @@ static struct kvm_event *kvm_alloc_init_event(struct event_key *key)
        }
 
        event->key = *key;
+       init_stats(&event->total.stats);
        return event;
 }
 
index eb524f91bffe5d9098d582e07050734a1c3e5343..8ce62ef7f6c387e3e23a18c51b9ec1cf82e2a189 100644 (file)
@@ -374,7 +374,7 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
 
        session = perf_session__new(file, false, NULL);
        if (session == NULL) {
-               pr_err("Not enough memory for reading perf file header\n");
+               pr_err("Perf session creation failed.\n");
                return -1;
        }
 
index ee21fa95ebcf60c2067b32f3e9b4575974f930ad..802cf544202b7b06720afc9abb1c9580cf362892 100644 (file)
@@ -34,6 +34,14 @@ ifeq ($(ARCH),arm)
   LIBUNWIND_LIBS = -lunwind -lunwind-arm
 endif
 
+# So far there's only x86 libdw unwind support merged in perf.
+# Disable it on all other architectures in case libdw unwind
+# support is detected in system. Add supported architectures
+# to the check.
+ifneq ($(ARCH),x86)
+  NO_LIBDW_DWARF_UNWIND := 1
+endif
+
 ifeq ($(LIBUNWIND_LIBS),)
   NO_LIBUNWIND := 1
 else
@@ -109,6 +117,10 @@ CFLAGS += -Wall
 CFLAGS += -Wextra
 CFLAGS += -std=gnu99
 
+# Enforce a non-executable stack, as we may regress (again) in the future by
+# adding assembler files missing the .GNU-stack linker note.
+LDFLAGS += -Wl,-z,noexecstack
+
 EXTLIBS = -lelf -lpthread -lrt -lm -ldl
 
 ifneq ($(OUTPUT),)
@@ -186,7 +198,10 @@ VF_FEATURE_TESTS =                 \
        stackprotector-all              \
        timerfd                         \
        libunwind-debug-frame           \
-       bionic
+       bionic                          \
+       liberty                         \
+       liberty-z                       \
+       cplus-demangle
 
 # Set FEATURE_CHECK_(C|LD)FLAGS-all for all CORE_FEATURE_TESTS features.
 # If in the future we need per-feature checks/flags for features not
@@ -504,7 +519,21 @@ else
 endif
 
 ifeq ($(feature-libbfd), 1)
-  EXTLIBS += -lbfd -lz -liberty
+  EXTLIBS += -lbfd
+
+  # call all detections now so we get correct
+  # status in VF output
+  $(call feature_check,liberty)
+  $(call feature_check,liberty-z)
+  $(call feature_check,cplus-demangle)
+
+  ifeq ($(feature-liberty), 1)
+    EXTLIBS += -liberty
+  else
+    ifeq ($(feature-liberty-z), 1)
+      EXTLIBS += -liberty -lz
+    endif
+  endif
 endif
 
 ifdef NO_DEMANGLE
@@ -515,15 +544,10 @@ else
     CFLAGS += -DHAVE_CPLUS_DEMANGLE_SUPPORT
   else
     ifneq ($(feature-libbfd), 1)
-      $(call feature_check,liberty)
-      ifeq ($(feature-liberty), 1)
-        EXTLIBS += -lbfd -liberty
-      else
-        $(call feature_check,liberty-z)
-        ifeq ($(feature-liberty-z), 1)
-          EXTLIBS += -lbfd -liberty -lz
-        else
-          $(call feature_check,cplus-demangle)
+      ifneq ($(feature-liberty), 1)
+        ifneq ($(feature-liberty-z), 1)
+          # we dont have neither HAVE_CPLUS_DEMANGLE_SUPPORT
+          # or any of 'bfd iberty z' trinity
           ifeq ($(feature-cplus-demangle), 1)
             EXTLIBS += -liberty
             CFLAGS += -DHAVE_CPLUS_DEMANGLE_SUPPORT
index 5daeae1cb4c01b3a87f4c54ee4018199c12aeeb6..2f92d6e7ee007bea58636fe8509757c4f626d77d 100644 (file)
@@ -46,6 +46,7 @@ make_install_man    := install-man
 make_install_html   := install-html
 make_install_info   := install-info
 make_install_pdf    := install-pdf
+make_static         := LDFLAGS=-static
 
 # all the NO_* variable combined
 make_minimal        := NO_LIBPERL=1 NO_LIBPYTHON=1 NO_NEWT=1 NO_GTK2=1
@@ -87,6 +88,7 @@ run += make_install_bin
 # run += make_install_info
 # run += make_install_pdf
 run += make_minimal
+run += make_static
 
 ifneq ($(call has,ctags),)
 run += make_tags
index 1fbcd8bdc11b8b387c4f9f2f1a57ae6807e9e92c..55de44ecebefb5aa74365eb0b04294cfb186585a 100644 (file)
@@ -86,10 +86,17 @@ static int open_file_read(struct perf_data_file *file)
 
 static int open_file_write(struct perf_data_file *file)
 {
+       int fd;
+
        if (check_backup(file))
                return -1;
 
-       return open(file->path, O_CREAT|O_RDWR|O_TRUNC, S_IRUSR|S_IWUSR);
+       fd = open(file->path, O_CREAT|O_RDWR|O_TRUNC, S_IRUSR|S_IWUSR);
+
+       if (fd < 0)
+               pr_err("failed to open %s : %s\n", file->path, strerror(errno));
+
+       return fd;
 }
 
 static int open_file(struct perf_data_file *file)
index a53cd0b8c151cdb898d3711c36e5081846813a15..27c2a5efe4504945bf9c8492b62b8256abc0be33 100644 (file)
@@ -717,7 +717,7 @@ static char *get_kernel_version(const char *root_dir)
 }
 
 static int map_groups__set_modules_path_dir(struct map_groups *mg,
-                               const char *dir_name)
+                               const char *dir_name, int depth)
 {
        struct dirent *dent;
        DIR *dir = opendir(dir_name);
@@ -742,7 +742,15 @@ static int map_groups__set_modules_path_dir(struct map_groups *mg,
                            !strcmp(dent->d_name, ".."))
                                continue;
 
-                       ret = map_groups__set_modules_path_dir(mg, path);
+                       /* Do not follow top-level source and build symlinks */
+                       if (depth == 0) {
+                               if (!strcmp(dent->d_name, "source") ||
+                                   !strcmp(dent->d_name, "build"))
+                                       continue;
+                       }
+
+                       ret = map_groups__set_modules_path_dir(mg, path,
+                                                              depth + 1);
                        if (ret < 0)
                                goto out;
                } else {
@@ -786,11 +794,11 @@ static int machine__set_modules_path(struct machine *machine)
        if (!version)
                return -1;
 
-       snprintf(modules_path, sizeof(modules_path), "%s/lib/modules/%s/kernel",
+       snprintf(modules_path, sizeof(modules_path), "%s/lib/modules/%s",
                 machine->root_dir, version);
        free(version);
 
-       return map_groups__set_modules_path_dir(&machine->kmaps, modules_path);
+       return map_groups__set_modules_path_dir(&machine->kmaps, modules_path, 0);
 }
 
 static int machine__create_module(void *arg, const char *name, u64 start)
index 3b7dbf51d4a93bd425fc41363d98a47acd7288ff..6864661a79dd03a6543d54320f470543304bdb1b 100644 (file)
@@ -6,6 +6,7 @@
 #include <inttypes.h>
 
 #include "symbol.h"
+#include "vdso.h"
 #include <symbol/kallsyms.h>
 #include "debug.h"
 
@@ -618,6 +619,7 @@ int symsrc__init(struct symsrc *ss, struct dso *dso, const char *name,
                GElf_Shdr shdr;
                ss->adjust_symbols = (ehdr.e_type == ET_EXEC ||
                                ehdr.e_type == ET_REL ||
+                               is_vdso_map(dso->short_name) ||
                                elf_section_by_name(elf, &ehdr, &shdr,
                                                     ".gnu.prelink_undo",
                                                     NULL) != NULL);
index d9186a2fdf0696f1a319ba6cac0377a66c1fe112..c2c0f20067a5028ebf304ca4758c830b80ffbe35 100644 (file)
@@ -89,15 +89,6 @@ else
        STRIPCMD = $(STRIP) -s --remove-section=.note --remove-section=.comment
 endif
 
-# if DEBUG is enabled, then we do not strip or optimize
-ifeq ($(strip $(DEBUG)),true)
-       CFLAGS += -O1 -g -DDEBUG
-       STRIPCMD = /bin/true -Since_we_are_debugging
-else
-       CFLAGS += $(OPTIMIZATION) -fomit-frame-pointer
-       STRIPCMD = $(STRIP) -s --remove-section=.note --remove-section=.comment
-endif
-
 # --- ACPIDUMP BEGIN ---
 
 vpath %.c \
@@ -128,7 +119,7 @@ clean:
        -rm -f $(OUTPUT)acpidump
 
 install-tools:
-       $(INSTALL) -d $(DESTDIR)${bindir}
+       $(INSTALL) -d $(DESTDIR)${sbindir}
        $(INSTALL_PROGRAM) $(OUTPUT)acpidump $(DESTDIR)${sbindir}
 
 install-man:
index 750512ba2c8846d2d1275abb796b425fdd4a987d..c7493b8f9b0ee5b951d73d64e7502e809be2d5ed 100644 (file)
@@ -14,6 +14,12 @@ all: $(NET_PROGS)
 run_tests: all
        @/bin/sh ./run_netsocktests || echo "sockettests: [FAIL]"
        @/bin/sh ./run_afpackettests || echo "afpackettests: [FAIL]"
-
+       @if /sbin/modprobe test_bpf ; then \
+               /sbin/rmmod test_bpf; \
+               echo "test_bpf: ok"; \
+       else \
+               echo "test_bpf: [FAIL]"; \
+               exit 1; \
+       fi
 clean:
        $(RM) $(NET_PROGS)
index 47b29834a6b61def09f6340013cc9b2927c03cd9..56ff9bebb577df935200aacfc1e8251ae0800bcc 100644 (file)
@@ -548,11 +548,10 @@ static bool handle_mmio_cfg_reg(struct kvm_vcpu *vcpu,
        u32 val;
        u32 *reg;
 
-       offset >>= 1;
        reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_cfg,
-                                 vcpu->vcpu_id, offset);
+                                 vcpu->vcpu_id, offset >> 1);
 
-       if (offset & 2)
+       if (offset & 4)
                val = *reg >> 16;
        else
                val = *reg & 0xffff;
@@ -561,13 +560,13 @@ static bool handle_mmio_cfg_reg(struct kvm_vcpu *vcpu,
        vgic_reg_access(mmio, &val, offset,
                        ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
        if (mmio->is_write) {
-               if (offset < 4) {
+               if (offset < 8) {
                        *reg = ~0U; /* Force PPIs/SGIs to 1 */
                        return false;
                }
 
                val = vgic_cfg_compress(val);
-               if (offset & 2) {
+               if (offset & 4) {
                        *reg &= 0xffff;
                        *reg |= val << 16;
                } else {
@@ -916,6 +915,7 @@ static void vgic_dispatch_sgi(struct kvm_vcpu *vcpu, u32 reg)
        case 0:
                if (!target_cpus)
                        return;
+               break;
 
        case 1:
                target_cpus = ((1 << nrcpus) - 1) & ~(1 << vcpu_id) & 0xff;
@@ -1667,10 +1667,11 @@ static int vgic_ioaddr_assign(struct kvm *kvm, phys_addr_t *ioaddr,
        if (addr + size < addr)
                return -EINVAL;
 
+       *ioaddr = addr;
        ret = vgic_ioaddr_overlap(kvm);
        if (ret)
-               return ret;
-       *ioaddr = addr;
+               *ioaddr = VGIC_ADDR_UNDEF;
+
        return ret;
 }
 
index 8db43701016f30cebeb37ab7e4d581166606b86f..bf06577fea51c22ab944edb9560e56f01aae2f94 100644 (file)
@@ -395,7 +395,8 @@ static int assigned_device_enable_host_msix(struct kvm *kvm,
        if (dev->entries_nr == 0)
                return r;
 
-       r = pci_enable_msix(dev->dev, dev->host_msix_entries, dev->entries_nr);
+       r = pci_enable_msix_exact(dev->dev,
+                                 dev->host_msix_entries, dev->entries_nr);
        if (r)
                return r;
 
index 10df100c4514e856d1ca5509f87f118e37726f57..06e6401d6ef45326edcbce4c8ff96e13286d2940 100644 (file)
@@ -101,7 +101,7 @@ static void async_pf_execute(struct work_struct *work)
        if (waitqueue_active(&vcpu->wq))
                wake_up_interruptible(&vcpu->wq);
 
-       mmdrop(mm);
+       mmput(mm);
        kvm_put_kvm(vcpu->kvm);
 }
 
@@ -118,7 +118,7 @@ void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu)
                flush_work(&work->work);
 #else
                if (cancel_work_sync(&work->work)) {
-                       mmdrop(work->mm);
+                       mmput(work->mm);
                        kvm_put_kvm(vcpu->kvm); /* == work->vcpu->kvm */
                        kmem_cache_free(async_pf_cache, work);
                }
@@ -183,7 +183,7 @@ int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, unsigned long hva,
        work->addr = hva;
        work->arch = *arch;
        work->mm = current->mm;
-       atomic_inc(&work->mm->mm_count);
+       atomic_inc(&work->mm->mm_users);
        kvm_get_kvm(work->vcpu->kvm);
 
        /* this can't really happen otherwise gfn_to_pfn_async
@@ -201,7 +201,7 @@ int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, unsigned long hva,
        return 1;
 retry_sync:
        kvm_put_kvm(work->vcpu->kvm);
-       mmdrop(work->mm);
+       mmput(work->mm);
        kmem_cache_free(async_pf_cache, work);
        return 0;
 }
This page took 2.081258 seconds and 5 git commands to generate.