Merge branches 'acpi-pci', 'acpi-soc' and 'pnp'
authorRafael J. Wysocki <rafael.j.wysocki@intel.com>
Mon, 14 Mar 2016 13:20:57 +0000 (14:20 +0100)
committerRafael J. Wysocki <rafael.j.wysocki@intel.com>
Mon, 14 Mar 2016 13:20:57 +0000 (14:20 +0100)
* acpi-pci:
  x86/ACPI/PCI: Recognize that Interrupt Line 255 means "not connected"

* acpi-soc:
  i2c: designware: Add device HID for future AMD I2C controller

* pnp:
  PNP / ACPI: add ACPI_RESOURCE_TYPE_SERIAL_BUS as a valid type

268 files changed:
Documentation/devicetree/bindings/arm/omap/omap.txt
Documentation/virtual/kvm/mmu.txt
MAINTAINERS
Makefile
arch/arm/boot/dts/armada-xp-axpwifiap.dts
arch/arm/boot/dts/armada-xp-db.dts
arch/arm/boot/dts/armada-xp-gp.dts
arch/arm/boot/dts/armada-xp-lenovo-ix4-300d.dts
arch/arm/boot/dts/armada-xp-linksys-mamba.dts
arch/arm/boot/dts/armada-xp-matrix.dts
arch/arm/boot/dts/armada-xp-netgear-rn2120.dts
arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts
arch/arm/boot/dts/armada-xp-synology-ds414.dts
arch/arm/boot/dts/dra7.dtsi
arch/arm/mach-omap2/omap_hwmod.c
arch/arm/mach-omap2/omap_hwmod.h
arch/arm64/include/asm/pgtable.h
arch/arm64/kernel/sleep.S
arch/arm64/mm/hugetlbpage.c
arch/mips/Kconfig
arch/mips/boot/compressed/uart-16550.c
arch/mips/kernel/smp.c
arch/powerpc/kvm/book3s_hv_rmhandlers.S
arch/s390/include/asm/mmu_context.h
arch/s390/include/asm/pgalloc.h
arch/s390/kernel/head64.S
arch/s390/kvm/kvm-s390.c
arch/x86/include/asm/fpu/xstate.h
arch/x86/kernel/fpu/core.c
arch/x86/kernel/fpu/init.c
arch/x86/kvm/mmu.c
arch/x86/kvm/vmx.c
arch/x86/lib/delay.c
arch/x86/platform/efi/quirks.c
drivers/acpi/Makefile
drivers/acpi/acpi_amba.c [new file with mode: 0644]
drivers/acpi/acpi_apd.c
drivers/acpi/acpi_platform.c
drivers/acpi/acpi_processor.c
drivers/acpi/acpica/acglobal.h
drivers/acpi/acpica/aclocal.h
drivers/acpi/acpica/acnamesp.h
drivers/acpi/acpica/acpredef.h
drivers/acpi/acpica/dbcmds.c
drivers/acpi/acpica/dbconvert.c
drivers/acpi/acpica/dsmethod.c
drivers/acpi/acpica/dsobject.c
drivers/acpi/acpica/evgpeblk.c
drivers/acpi/acpica/evgpeinit.c
drivers/acpi/acpica/evregion.c
drivers/acpi/acpica/exconfig.c
drivers/acpi/acpica/exoparg3.c
drivers/acpi/acpica/nseval.c
drivers/acpi/acpica/nsinit.c
drivers/acpi/acpica/psargs.c
drivers/acpi/acpica/tbinstal.c
drivers/acpi/acpica/tbprint.c
drivers/acpi/acpica/tbutils.c
drivers/acpi/acpica/tbxfload.c
drivers/acpi/acpica/utcache.c
drivers/acpi/acpica/utnonansi.c
drivers/acpi/acpica/uttrack.c
drivers/acpi/acpica/utxferror.c
drivers/acpi/acpica/utxfinit.c
drivers/acpi/apei/apei-base.c
drivers/acpi/apei/erst.c
drivers/acpi/bus.c
drivers/acpi/cppc_acpi.c
drivers/acpi/internal.h
drivers/acpi/osl.c
drivers/acpi/pci_irq.c
drivers/acpi/processor_driver.c
drivers/acpi/processor_idle.c
drivers/acpi/scan.c
drivers/acpi/sleep.c
drivers/acpi/tables.c
drivers/base/property.c
drivers/dma/at_xdmac.c
drivers/dma/fsldma.c
drivers/edac/sb_edac.c
drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
drivers/gpu/drm/amd/amdgpu/atombios_dp.c
drivers/gpu/drm/i2c/tda998x_drv.c
drivers/gpu/drm/i915/intel_audio.c
drivers/gpu/drm/i915/intel_ddi.c
drivers/gpu/drm/i915/intel_dp.c
drivers/gpu/drm/i915/intel_hdmi.c
drivers/gpu/drm/i915/intel_i2c.c
drivers/gpu/drm/imx/ipuv3-crtc.c
drivers/gpu/drm/imx/ipuv3-plane.c
drivers/gpu/drm/radeon/atombios_dp.c
drivers/gpu/drm/radeon/radeon_device.c
drivers/gpu/drm/radeon/radeon_display.c
drivers/gpu/drm/radeon/radeon_pm.c
drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
drivers/gpu/ipu-v3/ipu-common.c
drivers/i2c/busses/i2c-designware-platdrv.c
drivers/mailbox/pcc.c
drivers/media/media-device.c
drivers/mtd/tests/mtd_nandecctest.c
drivers/net/can/spi/mcp251x.c
drivers/net/can/usb/gs_usb.c
drivers/net/ethernet/3com/3c59x.c
drivers/net/ethernet/altera/altera_tse_main.c
drivers/net/ethernet/aurora/nb8800.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
drivers/net/ethernet/broadcom/bnxt/bnxt.c
drivers/net/ethernet/brocade/bna/bna_tx_rx.c
drivers/net/ethernet/cavium/thunder/nic.h
drivers/net/ethernet/cavium/thunder/nic_main.c
drivers/net/ethernet/cavium/thunder/nic_reg.h
drivers/net/ethernet/emulex/benet/be.h
drivers/net/ethernet/emulex/benet/be_cmds.h
drivers/net/ethernet/emulex/benet/be_main.c
drivers/net/ethernet/ethoc.c
drivers/net/ethernet/freescale/fman/fman.c
drivers/net/ethernet/freescale/gianfar.c
drivers/net/ethernet/hisilicon/Kconfig
drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h
drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
drivers/net/ethernet/ibm/ibmveth.c
drivers/net/ethernet/ibm/ibmvnic.c
drivers/net/ethernet/ibm/ibmvnic.h
drivers/net/ethernet/jme.c
drivers/net/ethernet/mellanox/mlx4/en_netdev.c
drivers/net/ethernet/mellanox/mlx4/main.c
drivers/net/ethernet/mellanox/mlx4/port.c
drivers/net/ethernet/mellanox/mlx5/core/en.h
drivers/net/ethernet/mellanox/mlx5/core/en_clock.c
drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
drivers/net/ethernet/mellanox/mlxsw/pci.c
drivers/net/ethernet/mellanox/mlxsw/spectrum.c
drivers/net/ethernet/moxa/moxart_ether.c
drivers/net/ethernet/qualcomm/qca_spi.c
drivers/net/ethernet/realtek/r8169.c
drivers/net/ethernet/renesas/ravb_main.c
drivers/net/ethernet/renesas/sh_eth.c
drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
drivers/net/ethernet/synopsys/dwc_eth_qos.c
drivers/net/phy/micrel.c
drivers/net/ppp/ppp_generic.c
drivers/net/usb/ax88172a.c
drivers/net/usb/cdc_ncm.c
drivers/net/usb/qmi_wwan.c
drivers/net/usb/usbnet.c
drivers/net/vmxnet3/vmxnet3_drv.c
drivers/net/vrf.c
drivers/net/vxlan.c
drivers/net/wireless/intel/iwlwifi/mvm/fw.c
drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
drivers/net/wireless/intel/iwlwifi/mvm/ops.c
drivers/net/wireless/intel/iwlwifi/mvm/tx.c
drivers/of/of_mdio.c
drivers/pci/pci.c
drivers/s390/block/dasd_diag.c
drivers/spi/spi-imx.c
drivers/spi/spi-rockchip.c
drivers/ssb/Kconfig
drivers/target/target_core_tmr.c
fs/dax.c
fs/ext4/move_extent.c
fs/jffs2/dir.c
fs/ncpfs/dir.c
fs/ocfs2/mmap.c
fs/overlayfs/dir.c
fs/overlayfs/inode.c
fs/overlayfs/super.c
fs/xfs/xfs_log_recover.c
include/acpi/acoutput.h
include/acpi/acpixf.h
include/acpi/processor.h
include/linux/bio.h
include/linux/dma-mapping.h
include/linux/interrupt.h
include/linux/kasan.h
include/linux/list.h
include/linux/mlx5/mlx5_ifc.h
include/linux/skbuff.h
include/linux/stmmac.h
include/linux/tracepoint.h
include/net/iw_handler.h
include/trace/events/asoc.h
include/uapi/linux/bpf.h
include/uapi/linux/media.h
kernel/irq/manage.c
kernel/memremap.c
kernel/sched/core.c
lib/list_debug.c
mm/filemap.c
mm/hugetlb.c
mm/kasan/kasan.c
mm/mempolicy.c
mm/mempool.c
net/bridge/br_fdb.c
net/core/filter.c
net/core/rtnetlink.c
net/core/skbuff.c
net/ipv4/igmp.c
net/ipv4/ip_output.c
net/ipv4/ip_tunnel.c
net/ipv4/tcp_metrics.c
net/ipv4/tcp_minisocks.c
net/ipv4/udp_tunnel.c
net/ipv6/exthdrs_core.c
net/ipv6/ip6_gre.c
net/ipv6/ip6_tunnel.c
net/ipv6/mcast.c
net/ipv6/udp.c
net/mac80211/agg-rx.c
net/mac80211/ieee80211_i.h
net/mac80211/rc80211_minstrel.c
net/mac80211/rc80211_minstrel_ht.c
net/mac80211/rx.c
net/sched/act_ipt.c
net/sctp/ipv6.c
net/sctp/proc.c
net/switchdev/switchdev.c
net/tipc/socket.c
net/tipc/subscr.c
net/wireless/core.c
net/wireless/nl80211.c
net/wireless/sme.c
net/wireless/wext-core.c
scripts/ld-version.sh
sound/soc/codecs/ab8500-codec.c
sound/soc/codecs/adau17x1.h
sound/soc/codecs/cs42l51.c
sound/soc/codecs/da732x.c
sound/soc/codecs/max98088.c
sound/soc/codecs/max98095.c
sound/soc/codecs/tlv320dac33.c
sound/soc/codecs/wl1273.c
sound/soc/codecs/wm8753.c
sound/soc/codecs/wm8904.c
sound/soc/codecs/wm8958-dsp2.c
sound/soc/codecs/wm8983.c
sound/soc/codecs/wm8985.c
sound/soc/codecs/wm8994.c
sound/soc/codecs/wm8996.c
sound/soc/codecs/wm9081.c
sound/soc/codecs/wm9713.c
sound/soc/codecs/wm_adsp.c
sound/soc/fsl/fsl_ssi.c
sound/soc/intel/boards/cht_bsw_rt5645.c
sound/soc/intel/boards/mfld_machine.c
sound/soc/intel/skylake/skl-topology.c
sound/soc/omap/n810.c
sound/soc/omap/rx51.c
sound/soc/pxa/corgi.c
sound/soc/pxa/magician.c
sound/soc/pxa/poodle.c
sound/soc/pxa/spitz.c
sound/soc/pxa/tosa.c
sound/soc/qcom/lpass-cpu.c
sound/soc/samsung/i2s.c
sound/soc/soc-dapm.c
virt/kvm/kvm_main.c

index a2bd593881cab361fa739d9a12e63ee7ba63ef50..66422d6631841d9e6eba9be63e453176ebd43f2e 100644 (file)
@@ -23,6 +23,7 @@ Optional properties:
   during suspend.
 - ti,no-reset-on-init: When present, the module should not be reset at init
 - ti,no-idle-on-init: When present, the module should not be idled at init
+- ti,no-idle: When present, the module is never allowed to idle.
 
 Example:
 
index daf9c0f742d22e882637391539e90ea31eb788f3..c81731096a4338bcec4d43b8049a26118d10f260 100644 (file)
@@ -358,7 +358,8 @@ In the first case there are two additional complications:
 - if CR4.SMEP is enabled: since we've turned the page into a kernel page,
   the kernel may now execute it.  We handle this by also setting spte.nx.
   If we get a user fetch or read fault, we'll change spte.u=1 and
-  spte.nx=gpte.nx back.
+  spte.nx=gpte.nx back.  For this to work, KVM forces EFER.NX to 1 when
+  shadow paging is in use.
 - if CR4.SMAP is disabled: since the page has been changed to a kernel
   page, it can not be reused when CR4.SMAP is enabled. We set
   CR4.SMAP && !CR0.WP into shadow page's role to avoid this case. Note,
index f5e6a535bc3479d37f1e76a8ed93209cb21d132b..6ee06ea47be4d1cb2f865587f5e175e3f0235c32 100644 (file)
@@ -4554,6 +4554,15 @@ S:       Maintained
 F:     drivers/net/ethernet/freescale/fs_enet/
 F:     include/linux/fs_enet_pd.h
 
+FREESCALE IMX / MXC FEC DRIVER
+M:     Fugang Duan <fugang.duan@nxp.com>
+L:     netdev@vger.kernel.org
+S:     Maintained
+F:     drivers/net/ethernet/freescale/fec_main.c
+F:     drivers/net/ethernet/freescale/fec_ptp.c
+F:     drivers/net/ethernet/freescale/fec.h
+F:     Documentation/devicetree/bindings/net/fsl-fec.txt
+
 FREESCALE QUICC ENGINE LIBRARY
 L:     linuxppc-dev@lists.ozlabs.org
 S:     Orphan
@@ -6770,6 +6779,7 @@ S:        Maintained
 F:     Documentation/networking/mac80211-injection.txt
 F:     include/net/mac80211.h
 F:     net/mac80211/
+F:     drivers/net/wireless/mac80211_hwsim.[ch]
 
 MACVLAN DRIVER
 M:     Patrick McHardy <kaber@trash.net>
@@ -7389,6 +7399,17 @@ W:       https://www.myricom.com/support/downloads/myri10ge.html
 S:     Supported
 F:     drivers/net/ethernet/myricom/myri10ge/
 
+NAND FLASH SUBSYSTEM
+M:     Boris Brezillon <boris.brezillon@free-electrons.com>
+R:     Richard Weinberger <richard@nod.at>
+L:     linux-mtd@lists.infradead.org
+W:     http://www.linux-mtd.infradead.org/
+Q:     http://patchwork.ozlabs.org/project/linux-mtd/list/
+T:     git git://github.com/linux-nand/linux.git
+S:     Maintained
+F:     drivers/mtd/nand/
+F:     include/linux/mtd/nand*.h
+
 NATSEMI ETHERNET DRIVER (DP8381x)
 S:     Orphan
 F:     drivers/net/ethernet/natsemi/natsemi.c
index 2d519d2fb3a9be1a64100c85141b824a37924602..7b3ecdcdc6c1815ca6241a72b1967593a0335b41 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 4
 PATCHLEVEL = 5
 SUBLEVEL = 0
-EXTRAVERSION = -rc7
+EXTRAVERSION =
 NAME = Blurry Fish Butt
 
 # *DOCUMENTATION*
index 23fc670c0427710168ce41ef012f9e37b8218eb6..5c21b236721fcb9d6e2b5055cbc89ae9527df7f1 100644 (file)
@@ -70,8 +70,8 @@
        soc {
                ranges = <MBUS_ID(0xf0, 0x01) 0 0 0xf1000000 0x100000
                          MBUS_ID(0x01, 0x1d) 0 0 0xfff00000 0x100000
-                         MBUS_ID(0x09, 0x09) 0 0 0xf8100000 0x10000
-                         MBUS_ID(0x09, 0x05) 0 0 0xf8110000 0x10000>;
+                         MBUS_ID(0x09, 0x09) 0 0 0xf1100000 0x10000
+                         MBUS_ID(0x09, 0x05) 0 0 0xf1110000 0x10000>;
 
                pcie-controller {
                        status = "okay";
index f774101416a5522841c475252d0a86842e475b29..ebe1d267406df5ab30e3a3189b669733eb8fcaa4 100644 (file)
@@ -76,8 +76,8 @@
                ranges = <MBUS_ID(0xf0, 0x01) 0 0 0xf1000000 0x100000
                          MBUS_ID(0x01, 0x1d) 0 0 0xfff00000 0x100000
                          MBUS_ID(0x01, 0x2f) 0 0 0xf0000000 0x1000000
-                         MBUS_ID(0x09, 0x09) 0 0 0xf8100000 0x10000
-                         MBUS_ID(0x09, 0x05) 0 0 0xf8110000 0x10000>;
+                         MBUS_ID(0x09, 0x09) 0 0 0xf1100000 0x10000
+                         MBUS_ID(0x09, 0x05) 0 0 0xf1110000 0x10000>;
 
                devbus-bootcs {
                        status = "okay";
index 4878d7353069fc2720e15d76af307618daced15c..5730b875c4f51a1aa2743d8881b18d6dbc0b27cd 100644 (file)
@@ -95,8 +95,8 @@
                ranges = <MBUS_ID(0xf0, 0x01) 0 0 0xf1000000 0x100000
                          MBUS_ID(0x01, 0x1d) 0 0 0xfff00000 0x100000
                          MBUS_ID(0x01, 0x2f) 0 0 0xf0000000 0x1000000
-                         MBUS_ID(0x09, 0x09) 0 0 0xf8100000 0x10000
-                         MBUS_ID(0x09, 0x05) 0 0 0xf8110000 0x10000>;
+                         MBUS_ID(0x09, 0x09) 0 0 0xf1100000 0x10000
+                         MBUS_ID(0x09, 0x05) 0 0 0xf1110000 0x10000>;
 
                devbus-bootcs {
                        status = "okay";
index fb9e1bbf23385b85b0b82ddb153b922b2d2e0178..8af463f26ea1e162360952dbc03c0a9bda807cd5 100644 (file)
@@ -65,8 +65,8 @@
        soc {
                ranges = <MBUS_ID(0xf0, 0x01) 0 0 0xd0000000 0x100000
                        MBUS_ID(0x01, 0x1d) 0 0 0xfff00000 0x100000
-                       MBUS_ID(0x09, 0x09) 0 0 0xf8100000 0x10000
-                       MBUS_ID(0x09, 0x05) 0 0 0xf8110000 0x10000>;
+                       MBUS_ID(0x09, 0x09) 0 0 0xf1100000 0x10000
+                       MBUS_ID(0x09, 0x05) 0 0 0xf1110000 0x10000>;
 
                pcie-controller {
                        status = "okay";
index 6e9820e141f8de5a850edcd7e74f0ea2a1acd1ed..b89e6cf1271a1370aead2a000e729cefad376ab0 100644 (file)
@@ -70,8 +70,8 @@
        soc {
                ranges = <MBUS_ID(0xf0, 0x01) 0 0 0xf1000000 0x100000
                          MBUS_ID(0x01, 0x1d) 0 0 0xfff00000 0x100000
-                         MBUS_ID(0x09, 0x09) 0 0 0xf8100000 0x10000
-                         MBUS_ID(0x09, 0x05) 0 0 0xf8110000 0x10000>;
+                         MBUS_ID(0x09, 0x09) 0 0 0xf1100000 0x10000
+                         MBUS_ID(0x09, 0x05) 0 0 0xf1110000 0x10000>;
 
                pcie-controller {
                        status = "okay";
index 6ab33837a2b6d0a5aaf4e48763bb838451a346bd..6522b04f4a8e7c7a759100153d7d990f1fb160d3 100644 (file)
@@ -68,8 +68,8 @@
        soc {
                ranges = <MBUS_ID(0xf0, 0x01) 0 0 0xf1000000 0x100000
                          MBUS_ID(0x01, 0x1d) 0 0 0xfff00000 0x100000
-                         MBUS_ID(0x09, 0x09) 0 0 0xf8100000 0x10000
-                         MBUS_ID(0x09, 0x05) 0 0 0xf8110000 0x10000>;
+                         MBUS_ID(0x09, 0x09) 0 0 0xf1100000 0x10000
+                         MBUS_ID(0x09, 0x05) 0 0 0xf1110000 0x10000>;
 
                internal-regs {
                        serial@12000 {
index 62175a8848bc2234343a8627d7cdf0ababfee851..d19f44c709257d9a35644d71cb49a04e354c42d8 100644 (file)
@@ -64,8 +64,8 @@
        soc {
                ranges = <MBUS_ID(0xf0, 0x01) 0 0 0xd0000000 0x100000
                          MBUS_ID(0x01, 0x1d) 0 0 0xfff00000 0x100000
-                         MBUS_ID(0x09, 0x09) 0 0 0xf8100000 0x10000
-                         MBUS_ID(0x09, 0x05) 0 0 0xf8110000 0x10000>;
+                         MBUS_ID(0x09, 0x09) 0 0 0xf1100000 0x10000
+                         MBUS_ID(0x09, 0x05) 0 0 0xf1110000 0x10000>;
 
                pcie-controller {
                        status = "okay";
index a5db17782e085662d01a22683c5c364be5c25c8c..853bd392a4fe20155ed1469a23175213814ca9dc 100644 (file)
@@ -65,9 +65,9 @@
        soc {
                ranges = <MBUS_ID(0xf0, 0x01) 0 0 0xd0000000 0x100000
                          MBUS_ID(0x01, 0x1d) 0 0 0xfff00000 0x100000
-                         MBUS_ID(0x01, 0x2f) 0 0 0xf0000000 0x8000000
-                         MBUS_ID(0x09, 0x09) 0 0 0xf8100000 0x10000
-                         MBUS_ID(0x09, 0x05) 0 0 0xf8110000 0x10000>;
+                         MBUS_ID(0x01, 0x2f) 0 0 0xe8000000 0x8000000
+                         MBUS_ID(0x09, 0x09) 0 0 0xf1100000 0x10000
+                         MBUS_ID(0x09, 0x05) 0 0 0xf1110000 0x10000>;
 
                devbus-bootcs {
                        status = "okay";
index 2391b11dc546b859dd3ab23a700be5a8a63ea83b..d17dab0a6f513e9c04ff2675f5f9d44d7f6953b3 100644 (file)
@@ -78,8 +78,8 @@
        soc {
                ranges = <MBUS_ID(0xf0, 0x01) 0 0 0xf1000000 0x100000
                          MBUS_ID(0x01, 0x1d) 0 0 0xfff00000 0x100000
-                         MBUS_ID(0x09, 0x09) 0 0 0xf8100000 0x10000
-                         MBUS_ID(0x09, 0x05) 0 0 0xf8110000 0x10000>;
+                         MBUS_ID(0x09, 0x09) 0 0 0xf1100000 0x10000
+                         MBUS_ID(0x09, 0x05) 0 0 0xf1110000 0x10000>;
 
                pcie-controller {
                        status = "okay";
index c4d9175b90dceab5aa64ca1973505652bf325bee..f82aa44c3ceed540fef7e291c571c2b49c5b2e1f 100644 (file)
                               0x48485200 0x2E00>;
                        #address-cells = <1>;
                        #size-cells = <1>;
+
+                       /*
+                        * Do not allow gating of cpsw clock as workaround
+                        * for errata i877. Keeping internal clock disabled
+                        * causes the device switching characteristics
+                        * to degrade over time and eventually fail to meet
+                        * the data manual delay time/skew specs.
+                        */
+                       ti,no-idle;
+
                        /*
                         * rx_thresh_pend
                         * rx_pend
index e9f65fec55c0b9beacfdf694424e601b4673327e..b6d62e4cdfddaf53f8d3aa7c1768b5e83255673e 100644 (file)
@@ -2200,6 +2200,11 @@ static int _enable(struct omap_hwmod *oh)
  */
 static int _idle(struct omap_hwmod *oh)
 {
+       if (oh->flags & HWMOD_NO_IDLE) {
+               oh->_int_flags |= _HWMOD_SKIP_ENABLE;
+               return 0;
+       }
+
        pr_debug("omap_hwmod: %s: idling\n", oh->name);
 
        if (oh->_state != _HWMOD_STATE_ENABLED) {
@@ -2504,6 +2509,8 @@ static int __init _init(struct omap_hwmod *oh, void *data)
                        oh->flags |= HWMOD_INIT_NO_RESET;
                if (of_find_property(np, "ti,no-idle-on-init", NULL))
                        oh->flags |= HWMOD_INIT_NO_IDLE;
+               if (of_find_property(np, "ti,no-idle", NULL))
+                       oh->flags |= HWMOD_NO_IDLE;
        }
 
        oh->_state = _HWMOD_STATE_INITIALIZED;
@@ -2630,7 +2637,7 @@ static void __init _setup_postsetup(struct omap_hwmod *oh)
         * XXX HWMOD_INIT_NO_IDLE does not belong in hwmod data -
         * it should be set by the core code as a runtime flag during startup
         */
-       if ((oh->flags & HWMOD_INIT_NO_IDLE) &&
+       if ((oh->flags & (HWMOD_INIT_NO_IDLE | HWMOD_NO_IDLE)) &&
            (postsetup_state == _HWMOD_STATE_IDLE)) {
                oh->_int_flags |= _HWMOD_SKIP_ENABLE;
                postsetup_state = _HWMOD_STATE_ENABLED;
index 76bce11c85a40c477a5b87aab919d5b8dfded5e8..7c7a31169475b72bc8fbbe2f0dabf015b395f257 100644 (file)
@@ -525,6 +525,8 @@ struct omap_hwmod_omap4_prcm {
  *     or idled.
  * HWMOD_OPT_CLKS_NEEDED: The optional clocks are needed for the module to
  *     operate and they need to be handled at the same time as the main_clk.
+ * HWMOD_NO_IDLE: Do not idle the hwmod at all. Useful to handle certain
+ *     IPs like CPSW on DRA7, where clocks to this module cannot be disabled.
  */
 #define HWMOD_SWSUP_SIDLE                      (1 << 0)
 #define HWMOD_SWSUP_MSTANDBY                   (1 << 1)
@@ -541,6 +543,7 @@ struct omap_hwmod_omap4_prcm {
 #define HWMOD_SWSUP_SIDLE_ACT                  (1 << 12)
 #define HWMOD_RECONFIG_IO_CHAIN                        (1 << 13)
 #define HWMOD_OPT_CLKS_NEEDED                  (1 << 14)
+#define HWMOD_NO_IDLE                          (1 << 15)
 
 /*
  * omap_hwmod._int_flags definitions
index f5060867458024a5a61ccfd4a77330c728e13010..819aff5d593f701d1101d4cc42dbec8cedbc80aa 100644 (file)
@@ -40,7 +40,7 @@
  * VMALLOC_END: extends to the available space below vmmemmap, PCI I/O space,
  *     fixed mappings and modules
  */
-#define VMEMMAP_SIZE           ALIGN((1UL << (VA_BITS - PAGE_SHIFT - 1)) * sizeof(struct page), PUD_SIZE)
+#define VMEMMAP_SIZE           ALIGN((1UL << (VA_BITS - PAGE_SHIFT)) * sizeof(struct page), PUD_SIZE)
 
 #ifndef CONFIG_KASAN
 #define VMALLOC_START          (VA_START)
@@ -52,7 +52,8 @@
 #define VMALLOC_END            (PAGE_OFFSET - PUD_SIZE - VMEMMAP_SIZE - SZ_64K)
 
 #define VMEMMAP_START          (VMALLOC_END + SZ_64K)
-#define vmemmap                        ((struct page *)VMEMMAP_START - (memstart_addr >> PAGE_SHIFT))
+#define vmemmap                        ((struct page *)VMEMMAP_START - \
+                                SECTION_ALIGN_DOWN(memstart_addr >> PAGE_SHIFT))
 
 #define FIRST_USER_ADDRESS     0UL
 
index e33fe33876ab3804f2c6dcd6c5458e576596ef24..fd10eb6638689c1262c34cca7de533453cb383ae 100644 (file)
@@ -145,6 +145,10 @@ ENTRY(cpu_resume_mmu)
 ENDPROC(cpu_resume_mmu)
        .popsection
 cpu_resume_after_mmu:
+#ifdef CONFIG_KASAN
+       mov     x0, sp
+       bl      kasan_unpoison_remaining_stack
+#endif
        mov     x0, #0                  // return zero on success
        ldp     x19, x20, [sp, #16]
        ldp     x21, x22, [sp, #32]
index 82d607c3614ed8454d19f2da5d44a6cc914ee0b3..da30529bb1f65c9e3d5408b2e28ab31bc2283211 100644 (file)
@@ -306,10 +306,6 @@ static __init int setup_hugepagesz(char *opt)
                hugetlb_add_hstate(PMD_SHIFT - PAGE_SHIFT);
        } else if (ps == PUD_SIZE) {
                hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT);
-       } else if (ps == (PAGE_SIZE * CONT_PTES)) {
-               hugetlb_add_hstate(CONT_PTE_SHIFT);
-       } else if (ps == (PMD_SIZE * CONT_PMDS)) {
-               hugetlb_add_hstate((PMD_SHIFT + CONT_PMD_SHIFT) - PAGE_SHIFT);
        } else {
                pr_err("hugepagesz: Unsupported page size %lu K\n", ps >> 10);
                return 0;
@@ -317,13 +313,3 @@ static __init int setup_hugepagesz(char *opt)
        return 1;
 }
 __setup("hugepagesz=", setup_hugepagesz);
-
-#ifdef CONFIG_ARM64_64K_PAGES
-static __init int add_default_hugepagesz(void)
-{
-       if (size_to_hstate(CONT_PTES * PAGE_SIZE) == NULL)
-               hugetlb_add_hstate(CONT_PMD_SHIFT);
-       return 0;
-}
-arch_initcall(add_default_hugepagesz);
-#endif
index 74a3db92da1b52edc15165ac06d3b1558b8b78c0..d3da79dda629114e936af3e811348171c62a9bcd 100644 (file)
@@ -2169,7 +2169,7 @@ config MIPS_MT_SMP
        select CPU_MIPSR2_IRQ_VI
        select CPU_MIPSR2_IRQ_EI
        select SYNC_R4K
-       select MIPS_GIC_IPI
+       select MIPS_GIC_IPI if MIPS_GIC
        select MIPS_MT
        select SMP
        select SMP_UP
@@ -2267,7 +2267,7 @@ config MIPS_VPE_APSP_API_MT
 config MIPS_CMP
        bool "MIPS CMP framework support (DEPRECATED)"
        depends on SYS_SUPPORTS_MIPS_CMP && !CPU_MIPSR6
-       select MIPS_GIC_IPI
+       select MIPS_GIC_IPI if MIPS_GIC
        select SMP
        select SYNC_R4K
        select SYS_SUPPORTS_SMP
@@ -2287,7 +2287,7 @@ config MIPS_CPS
        select MIPS_CM
        select MIPS_CPC
        select MIPS_CPS_PM if HOTPLUG_CPU
-       select MIPS_GIC_IPI
+       select MIPS_GIC_IPI if MIPS_GIC
        select SMP
        select SYNC_R4K if (CEVT_R4K || CSRC_R4K)
        select SYS_SUPPORTS_HOTPLUG_CPU
@@ -2306,6 +2306,7 @@ config MIPS_CPS_PM
        bool
 
 config MIPS_GIC_IPI
+       depends on MIPS_GIC
        bool
 
 config MIPS_CM
index 408799a839b42f2d9d7d1232bd7ea4efda0cf2d4..f7521142deda55fb439fa0857a2383b4dad7cc83 100644 (file)
@@ -17,7 +17,7 @@
 #define PORT(offset) (CKSEG1ADDR(AR7_REGS_UART0) + (4 * offset))
 #endif
 
-#ifdef CONFIG_MACH_JZ4740
+#if defined(CONFIG_MACH_JZ4740) || defined(CONFIG_MACH_JZ4780)
 #include <asm/mach-jz4740/base.h>
 #define PORT(offset) (CKSEG1ADDR(JZ4740_UART0_BASE_ADDR) + (4 * offset))
 #endif
index bd4385a8e6e86f7fbb9ca6d988f5eee155b9a8c7..2b521e07b8601a2c80e888b3064009779effa78b 100644 (file)
@@ -121,6 +121,7 @@ static inline void calculate_cpu_foreign_map(void)
        cpumask_t temp_foreign_map;
 
        /* Re-calculate the mask */
+       cpumask_clear(&temp_foreign_map);
        for_each_online_cpu(i) {
                core_present = 0;
                for_each_cpu(k, &temp_foreign_map)
index 6ee26de9a1ded02e0e08f376fd612049eedee0b4..25ae2c9913c39c2fae34fb60dd7ee655ba821b6e 100644 (file)
@@ -1370,6 +1370,20 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
        std     r6, VCPU_ACOP(r9)
        stw     r7, VCPU_GUEST_PID(r9)
        std     r8, VCPU_WORT(r9)
+       /*
+        * Restore various registers to 0, where non-zero values
+        * set by the guest could disrupt the host.
+        */
+       li      r0, 0
+       mtspr   SPRN_IAMR, r0
+       mtspr   SPRN_CIABR, r0
+       mtspr   SPRN_DAWRX, r0
+       mtspr   SPRN_TCSCR, r0
+       mtspr   SPRN_WORT, r0
+       /* Set MMCRS to 1<<31 to freeze and disable the SPMC counters */
+       li      r0, 1
+       sldi    r0, r0, 31
+       mtspr   SPRN_MMCRS, r0
 8:
 
        /* Save and reset AMR and UAMOR before turning on the MMU */
index fb1b93ea3e3fead0efde9f30e819c55eecb88da9..e485817f7b1a97683e64ab04d1f7444e1aae2e1d 100644 (file)
 static inline int init_new_context(struct task_struct *tsk,
                                   struct mm_struct *mm)
 {
+       spin_lock_init(&mm->context.list_lock);
+       INIT_LIST_HEAD(&mm->context.pgtable_list);
+       INIT_LIST_HEAD(&mm->context.gmap_list);
        cpumask_clear(&mm->context.cpu_attach_mask);
        atomic_set(&mm->context.attach_count, 0);
        mm->context.flush_mm = 0;
-       mm->context.asce_bits = _ASCE_TABLE_LENGTH | _ASCE_USER_BITS;
-       mm->context.asce_bits |= _ASCE_TYPE_REGION3;
 #ifdef CONFIG_PGSTE
        mm->context.alloc_pgste = page_table_allocate_pgste;
        mm->context.has_pgste = 0;
        mm->context.use_skey = 0;
 #endif
-       mm->context.asce_limit = STACK_TOP_MAX;
+       if (mm->context.asce_limit == 0) {
+               /* context created by exec, set asce limit to 4TB */
+               mm->context.asce_bits = _ASCE_TABLE_LENGTH |
+                       _ASCE_USER_BITS | _ASCE_TYPE_REGION3;
+               mm->context.asce_limit = STACK_TOP_MAX;
+       } else if (mm->context.asce_limit == (1UL << 31)) {
+               mm_inc_nr_pmds(mm);
+       }
        crst_table_init((unsigned long *) mm->pgd, pgd_entry_type(mm));
        return 0;
 }
@@ -111,8 +119,6 @@ static inline void activate_mm(struct mm_struct *prev,
 static inline void arch_dup_mmap(struct mm_struct *oldmm,
                                 struct mm_struct *mm)
 {
-       if (oldmm->context.asce_limit < mm->context.asce_limit)
-               crst_table_downgrade(mm, oldmm->context.asce_limit);
 }
 
 static inline void arch_exit_mmap(struct mm_struct *mm)
index 7b7858f158b4574b549ab99648f977a3e249068c..d7cc79fb6191117a0ee0646ae6bbdf3a92a58132 100644 (file)
@@ -100,12 +100,26 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
 
 static inline pgd_t *pgd_alloc(struct mm_struct *mm)
 {
-       spin_lock_init(&mm->context.list_lock);
-       INIT_LIST_HEAD(&mm->context.pgtable_list);
-       INIT_LIST_HEAD(&mm->context.gmap_list);
-       return (pgd_t *) crst_table_alloc(mm);
+       unsigned long *table = crst_table_alloc(mm);
+
+       if (!table)
+               return NULL;
+       if (mm->context.asce_limit == (1UL << 31)) {
+               /* Forking a compat process with 2 page table levels */
+               if (!pgtable_pmd_page_ctor(virt_to_page(table))) {
+                       crst_table_free(mm, table);
+                       return NULL;
+               }
+       }
+       return (pgd_t *) table;
+}
+
+static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
+{
+       if (mm->context.asce_limit == (1UL << 31))
+               pgtable_pmd_page_dtor(virt_to_page(pgd));
+       crst_table_free(mm, (unsigned long *) pgd);
 }
-#define pgd_free(mm, pgd) crst_table_free(mm, (unsigned long *) pgd)
 
 static inline void pmd_populate(struct mm_struct *mm,
                                pmd_t *pmd, pgtable_t pte)
index c5febe84eba633385cd7a8c09bee87e484c2e05c..03c2b469c4725172e4c6750cb8bcb0fb4edea075 100644 (file)
@@ -16,7 +16,7 @@
 
 __HEAD
 ENTRY(startup_continue)
-       tm      __LC_STFLE_FAC_LIST+6,0x80      # LPP available ?
+       tm      __LC_STFLE_FAC_LIST+5,0x80      # LPP available ?
        jz      0f
        xc      __LC_LPP+1(7,0),__LC_LPP+1      # clear lpp and current_pid
        mvi     __LC_LPP,0x80                   #   and set LPP_MAGIC
index 4af21c771f9b3925bf860d0dc86cebcc803cf965..03dfe9c667f4eb944705787e54ff7e6ac3c08afb 100644 (file)
@@ -2381,7 +2381,7 @@ int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
 
        /* manually convert vector registers if necessary */
        if (MACHINE_HAS_VX) {
-               convert_vx_to_fp(fprs, current->thread.fpu.vxrs);
+               convert_vx_to_fp(fprs, (__vector128 *) vcpu->run->s.regs.vrs);
                rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
                                     fprs, 128);
        } else {
index af30fdeb140da75691e91c52446f7f1bb7fe2e93..f23cd8c80b1c818057053e831ec704ab4006fba4 100644 (file)
 
 /* Supported features which support lazy state saving */
 #define XFEATURE_MASK_LAZY     (XFEATURE_MASK_FP | \
-                                XFEATURE_MASK_SSE)
-
-/* Supported features which require eager state saving */
-#define XFEATURE_MASK_EAGER    (XFEATURE_MASK_BNDREGS | \
-                                XFEATURE_MASK_BNDCSR | \
+                                XFEATURE_MASK_SSE | \
                                 XFEATURE_MASK_YMM | \
                                 XFEATURE_MASK_OPMASK | \
                                 XFEATURE_MASK_ZMM_Hi256 | \
                                 XFEATURE_MASK_Hi16_ZMM)
 
+/* Supported features which require eager state saving */
+#define XFEATURE_MASK_EAGER    (XFEATURE_MASK_BNDREGS | XFEATURE_MASK_BNDCSR)
+
 /* All currently supported features */
 #define XCNTXT_MASK    (XFEATURE_MASK_LAZY | XFEATURE_MASK_EAGER)
 
index d25097c3fc1d1af8af35c156f05121f9f4d46a94..d5804adfa6da6c3d495b124faf47c288e2c0c6cf 100644 (file)
@@ -409,8 +409,10 @@ static inline void copy_init_fpstate_to_fpregs(void)
 {
        if (use_xsave())
                copy_kernel_to_xregs(&init_fpstate.xsave, -1);
-       else
+       else if (static_cpu_has(X86_FEATURE_FXSR))
                copy_kernel_to_fxregs(&init_fpstate.fxsave);
+       else
+               copy_kernel_to_fregs(&init_fpstate.fsave);
 }
 
 /*
index 6d9f0a7ef4c8e5da3d596444242de7478d7b9356..bd08fb77073d833ec8f1e27bc67d20fa37b559dc 100644 (file)
@@ -78,13 +78,15 @@ static void fpu__init_system_early_generic(struct cpuinfo_x86 *c)
        cr0 &= ~(X86_CR0_TS | X86_CR0_EM);
        write_cr0(cr0);
 
-       asm volatile("fninit ; fnstsw %0 ; fnstcw %1"
-                    : "+m" (fsw), "+m" (fcw));
+       if (!test_bit(X86_FEATURE_FPU, (unsigned long *)cpu_caps_cleared)) {
+               asm volatile("fninit ; fnstsw %0 ; fnstcw %1"
+                            : "+m" (fsw), "+m" (fcw));
 
-       if (fsw == 0 && (fcw & 0x103f) == 0x003f)
-               set_cpu_cap(c, X86_FEATURE_FPU);
-       else
-               clear_cpu_cap(c, X86_FEATURE_FPU);
+               if (fsw == 0 && (fcw & 0x103f) == 0x003f)
+                       set_cpu_cap(c, X86_FEATURE_FPU);
+               else
+                       clear_cpu_cap(c, X86_FEATURE_FPU);
+       }
 
 #ifndef CONFIG_MATH_EMULATION
        if (!cpu_has_fpu) {
@@ -132,7 +134,7 @@ static void __init fpu__init_system_generic(void)
         * Set up the legacy init FPU context. (xstate init might overwrite this
         * with a more modern format, if the CPU supports it.)
         */
-       fpstate_init_fxstate(&init_fpstate.fxsave);
+       fpstate_init(&init_fpstate);
 
        fpu__init_system_mxcsr();
 }
@@ -300,12 +302,6 @@ u64 __init fpu__get_supported_xfeatures_mask(void)
 static void __init fpu__clear_eager_fpu_features(void)
 {
        setup_clear_cpu_cap(X86_FEATURE_MPX);
-       setup_clear_cpu_cap(X86_FEATURE_AVX);
-       setup_clear_cpu_cap(X86_FEATURE_AVX2);
-       setup_clear_cpu_cap(X86_FEATURE_AVX512F);
-       setup_clear_cpu_cap(X86_FEATURE_AVX512PF);
-       setup_clear_cpu_cap(X86_FEATURE_AVX512ER);
-       setup_clear_cpu_cap(X86_FEATURE_AVX512CD);
 }
 
 /*
index 95a955de5964bcc3f4aa6791a004e29b28504c13..1e7a49bfc94fb323cbb11782693cab89743f1133 100644 (file)
@@ -3721,13 +3721,15 @@ static void reset_rsvds_bits_mask_ept(struct kvm_vcpu *vcpu,
 void
 reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context)
 {
+       bool uses_nx = context->nx || context->base_role.smep_andnot_wp;
+
        /*
         * Passing "true" to the last argument is okay; it adds a check
         * on bit 8 of the SPTEs which KVM doesn't use anyway.
         */
        __reset_rsvds_bits_mask(vcpu, &context->shadow_zero_check,
                                boot_cpu_data.x86_phys_bits,
-                               context->shadow_root_level, context->nx,
+                               context->shadow_root_level, uses_nx,
                                guest_cpuid_has_gbpages(vcpu), is_pse(vcpu),
                                true);
 }
index 0ff453749a909b9f0140b5de5d5d2dbb133e99d9..9bd8f44baded2318e8b5bc2a4773068a45a8723b 100644 (file)
@@ -1813,6 +1813,13 @@ static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr,
                        return;
                }
                break;
+       case MSR_IA32_PEBS_ENABLE:
+               /* PEBS needs a quiescent period after being disabled (to write
+                * a record).  Disabling PEBS through VMX MSR swapping doesn't
+                * provide that period, so a CPU could write host's record into
+                * guest's memory.
+                */
+               wrmsrl(MSR_IA32_PEBS_ENABLE, 0);
        }
 
        for (i = 0; i < m->nr; ++i)
@@ -1850,26 +1857,31 @@ static void reload_tss(void)
 
 static bool update_transition_efer(struct vcpu_vmx *vmx, int efer_offset)
 {
-       u64 guest_efer;
-       u64 ignore_bits;
+       u64 guest_efer = vmx->vcpu.arch.efer;
+       u64 ignore_bits = 0;
 
-       guest_efer = vmx->vcpu.arch.efer;
+       if (!enable_ept) {
+               /*
+                * NX is needed to handle CR0.WP=1, CR4.SMEP=1.  Testing
+                * host CPUID is more efficient than testing guest CPUID
+                * or CR4.  Host SMEP is anyway a requirement for guest SMEP.
+                */
+               if (boot_cpu_has(X86_FEATURE_SMEP))
+                       guest_efer |= EFER_NX;
+               else if (!(guest_efer & EFER_NX))
+                       ignore_bits |= EFER_NX;
+       }
 
        /*
-        * NX is emulated; LMA and LME handled by hardware; SCE meaningless
-        * outside long mode
+        * LMA and LME handled by hardware; SCE meaningless outside long mode.
         */
-       ignore_bits = EFER_NX | EFER_SCE;
+       ignore_bits |= EFER_SCE;
 #ifdef CONFIG_X86_64
        ignore_bits |= EFER_LMA | EFER_LME;
        /* SCE is meaningful only in long mode on Intel */
        if (guest_efer & EFER_LMA)
                ignore_bits &= ~(u64)EFER_SCE;
 #endif
-       guest_efer &= ~ignore_bits;
-       guest_efer |= host_efer & ignore_bits;
-       vmx->guest_msrs[efer_offset].data = guest_efer;
-       vmx->guest_msrs[efer_offset].mask = ~ignore_bits;
 
        clear_atomic_switch_msr(vmx, MSR_EFER);
 
@@ -1880,16 +1892,21 @@ static bool update_transition_efer(struct vcpu_vmx *vmx, int efer_offset)
         */
        if (cpu_has_load_ia32_efer ||
            (enable_ept && ((vmx->vcpu.arch.efer ^ host_efer) & EFER_NX))) {
-               guest_efer = vmx->vcpu.arch.efer;
                if (!(guest_efer & EFER_LMA))
                        guest_efer &= ~EFER_LME;
                if (guest_efer != host_efer)
                        add_atomic_switch_msr(vmx, MSR_EFER,
                                              guest_efer, host_efer);
                return false;
-       }
+       } else {
+               guest_efer &= ~ignore_bits;
+               guest_efer |= host_efer & ignore_bits;
 
-       return true;
+               vmx->guest_msrs[efer_offset].data = guest_efer;
+               vmx->guest_msrs[efer_offset].mask = ~ignore_bits;
+
+               return true;
+       }
 }
 
 static unsigned long segment_base(u16 selector)
index e912b2f6d36e5392b33bf6749f19a53cff6d7a06..2f07c291dcc855af5c483c55133d91a4dff21c92 100644 (file)
@@ -102,7 +102,7 @@ static void delay_mwaitx(unsigned long __loops)
                 * Use cpu_tss as a cacheline-aligned, seldomly
                 * accessed per-cpu variable as the monitor target.
                 */
-               __monitorx(this_cpu_ptr(&cpu_tss), 0, 0);
+               __monitorx(raw_cpu_ptr(&cpu_tss), 0, 0);
 
                /*
                 * AMD, like Intel, supports the EAX hint and EAX=0xf
index 2d66db8f80f992d3b0609373502031aacf91f161..ed30e79347e86377198009dcafb1c0b26a9865df 100644 (file)
@@ -130,6 +130,27 @@ efi_status_t efi_query_variable_store(u32 attributes, unsigned long size)
 }
 EXPORT_SYMBOL_GPL(efi_query_variable_store);
 
+/*
+ * Helper function for efi_reserve_boot_services() to figure out if we
+ * can free regions in efi_free_boot_services().
+ *
+ * Use this function to ensure we do not free regions owned by somebody
+ * else. We must only reserve (and then free) regions:
+ *
+ * - Not within any part of the kernel
+ * - Not the BIOS reserved area (E820_RESERVED, E820_NVS, etc)
+ */
+static bool can_free_region(u64 start, u64 size)
+{
+       if (start + size > __pa_symbol(_text) && start <= __pa_symbol(_end))
+               return false;
+
+       if (!e820_all_mapped(start, start+size, E820_RAM))
+               return false;
+
+       return true;
+}
+
 /*
  * The UEFI specification makes it clear that the operating system is free to do
  * whatever it wants with boot services code after ExitBootServices() has been
@@ -147,26 +168,50 @@ void __init efi_reserve_boot_services(void)
                efi_memory_desc_t *md = p;
                u64 start = md->phys_addr;
                u64 size = md->num_pages << EFI_PAGE_SHIFT;
+               bool already_reserved;
 
                if (md->type != EFI_BOOT_SERVICES_CODE &&
                    md->type != EFI_BOOT_SERVICES_DATA)
                        continue;
-               /* Only reserve where possible:
-                * - Not within any already allocated areas
-                * - Not over any memory area (really needed, if above?)
-                * - Not within any part of the kernel
-                * - Not the bios reserved area
-               */
-               if ((start + size > __pa_symbol(_text)
-                               && start <= __pa_symbol(_end)) ||
-                       !e820_all_mapped(start, start+size, E820_RAM) ||
-                       memblock_is_region_reserved(start, size)) {
-                       /* Could not reserve, skip it */
-                       md->num_pages = 0;
-                       memblock_dbg("Could not reserve boot range [0x%010llx-0x%010llx]\n",
-                                    start, start+size-1);
-               } else
+
+               already_reserved = memblock_is_region_reserved(start, size);
+
+               /*
+                * Because the following memblock_reserve() is paired
+                * with free_bootmem_late() for this region in
+                * efi_free_boot_services(), we must be extremely
+                * careful not to reserve, and subsequently free,
+                * critical regions of memory (like the kernel image) or
+                * those regions that somebody else has already
+                * reserved.
+                *
+                * A good example of a critical region that must not be
+                * freed is page zero (first 4Kb of memory), which may
+                * contain boot services code/data but is marked
+                * E820_RESERVED by trim_bios_range().
+                */
+               if (!already_reserved) {
                        memblock_reserve(start, size);
+
+                       /*
+                        * If we are the first to reserve the region, no
+                        * one else cares about it. We own it and can
+                        * free it later.
+                        */
+                       if (can_free_region(start, size))
+                               continue;
+               }
+
+               /*
+                * We don't own the region. We must not free it.
+                *
+                * Setting this bit for a boot services region really
+                * doesn't make sense as far as the firmware is
+                * concerned, but it does provide us with a way to tag
+                * those regions that must not be paired with
+                * free_bootmem_late().
+                */
+               md->attribute |= EFI_MEMORY_RUNTIME;
        }
 }
 
@@ -183,8 +228,8 @@ void __init efi_free_boot_services(void)
                    md->type != EFI_BOOT_SERVICES_DATA)
                        continue;
 
-               /* Could not reserve boot area */
-               if (!size)
+               /* Do not free, someone else owns it: */
+               if (md->attribute & EFI_MEMORY_RUNTIME)
                        continue;
 
                free_bootmem_late(start, size);
index cb648a49543a55a091b7ba756c628d7a076ac428..edeb2d1d99bec7b605a1385d1c5e8405da6bef40 100644 (file)
@@ -43,6 +43,7 @@ acpi-y                                += pci_root.o pci_link.o pci_irq.o
 acpi-y                         += acpi_lpss.o acpi_apd.o
 acpi-y                         += acpi_platform.o
 acpi-y                         += acpi_pnp.o
+acpi-$(CONFIG_ARM_AMBA)        += acpi_amba.o
 acpi-y                         += int340x_thermal.o
 acpi-y                         += power.o
 acpi-y                         += event.o
diff --git a/drivers/acpi/acpi_amba.c b/drivers/acpi/acpi_amba.c
new file mode 100644 (file)
index 0000000..2a61b54
--- /dev/null
@@ -0,0 +1,122 @@
+
+/*
+ * ACPI support for platform bus type.
+ *
+ * Copyright (C) 2015, Linaro Ltd
+ * Author: Graeme Gregory <graeme.gregory@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/acpi.h>
+#include <linux/amba/bus.h>
+#include <linux/clkdev.h>
+#include <linux/clk-provider.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/ioport.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+#include "internal.h"
+
+static const struct acpi_device_id amba_id_list[] = {
+       {"ARMH0061", 0}, /* PL061 GPIO Device */
+       {"", 0},
+};
+
+static void amba_register_dummy_clk(void)
+{
+       static struct clk *amba_dummy_clk;
+
+       /* If clock already registered */
+       if (amba_dummy_clk)
+               return;
+
+       amba_dummy_clk = clk_register_fixed_rate(NULL, "apb_pclk", NULL,
+                                               CLK_IS_ROOT, 0);
+       clk_register_clkdev(amba_dummy_clk, "apb_pclk", NULL);
+}
+
+static int amba_handler_attach(struct acpi_device *adev,
+                               const struct acpi_device_id *id)
+{
+       struct amba_device *dev;
+       struct resource_entry *rentry;
+       struct list_head resource_list;
+       bool address_found = false;
+       int irq_no = 0;
+       int ret;
+
+       /* If the ACPI node already has a physical device attached, skip it. */
+       if (adev->physical_node_count)
+               return 0;
+
+       dev = amba_device_alloc(dev_name(&adev->dev), 0, 0);
+       if (!dev) {
+               dev_err(&adev->dev, "%s(): amba_device_alloc() failed\n",
+                       __func__);
+               return -ENOMEM;
+       }
+
+       INIT_LIST_HEAD(&resource_list);
+       ret = acpi_dev_get_resources(adev, &resource_list, NULL, NULL);
+       if (ret < 0)
+               goto err_free;
+
+       list_for_each_entry(rentry, &resource_list, node) {
+               switch (resource_type(rentry->res)) {
+               case IORESOURCE_MEM:
+                       if (!address_found) {
+                               dev->res = *rentry->res;
+                               address_found = true;
+                       }
+                       break;
+               case IORESOURCE_IRQ:
+                       if (irq_no < AMBA_NR_IRQS)
+                               dev->irq[irq_no++] = rentry->res->start;
+                       break;
+               default:
+                       dev_warn(&adev->dev, "Invalid resource\n");
+                       break;
+               }
+       }
+
+       acpi_dev_free_resource_list(&resource_list);
+
+       /*
+        * If the ACPI node has a parent and that parent has a physical device
+        * attached to it, that physical device should be the parent of
+        * the amba device we are about to create.
+        */
+       if (adev->parent)
+               dev->dev.parent = acpi_get_first_physical_node(adev->parent);
+
+       ACPI_COMPANION_SET(&dev->dev, adev);
+
+       ret = amba_device_add(dev, &iomem_resource);
+       if (ret) {
+               dev_err(&adev->dev, "%s(): amba_device_add() failed (%d)\n",
+                      __func__, ret);
+               goto err_free;
+       }
+
+       return 1;
+
+err_free:
+       amba_device_put(dev);
+       return ret;
+}
+
+static struct acpi_scan_handler amba_handler = {
+       .ids = amba_id_list,
+       .attach = amba_handler_attach,
+};
+
+void __init acpi_amba_init(void)
+{
+       amba_register_dummy_clk();
+       acpi_scan_add_handler(&amba_handler);
+}
index d507cf6deda050fde79d3b6237fdbf410407d5d3..d0aad06b38720555a7da9138d5bc7f307100ca43 100644 (file)
@@ -143,6 +143,7 @@ static const struct acpi_device_id acpi_apd_device_ids[] = {
        /* Generic apd devices */
 #ifdef CONFIG_X86_AMD_PLATFORM_DEVICE
        { "AMD0010", APD_ADDR(cz_i2c_desc) },
+       { "AMDI0010", APD_ADDR(cz_i2c_desc) },
        { "AMD0020", APD_ADDR(cz_uart_desc) },
        { "AMD0030", },
 #endif
index 296b7a14893aabba895c578e8671e36b34875a37..c3af1088bf6b88d322e72c8071d1e05a6d074460 100644 (file)
@@ -43,7 +43,6 @@ static const struct acpi_device_id forbidden_id_list[] = {
 struct platform_device *acpi_create_platform_device(struct acpi_device *adev)
 {
        struct platform_device *pdev = NULL;
-       struct acpi_device *acpi_parent;
        struct platform_device_info pdevinfo;
        struct resource_entry *rentry;
        struct list_head resource_list;
@@ -82,22 +81,8 @@ struct platform_device *acpi_create_platform_device(struct acpi_device *adev)
         * attached to it, that physical device should be the parent of the
         * platform device we are about to create.
         */
-       pdevinfo.parent = NULL;
-       acpi_parent = adev->parent;
-       if (acpi_parent) {
-               struct acpi_device_physical_node *entry;
-               struct list_head *list;
-
-               mutex_lock(&acpi_parent->physical_node_lock);
-               list = &acpi_parent->physical_node_list;
-               if (!list_empty(list)) {
-                       entry = list_first_entry(list,
-                                       struct acpi_device_physical_node,
-                                       node);
-                       pdevinfo.parent = entry->dev;
-               }
-               mutex_unlock(&acpi_parent->physical_node_lock);
-       }
+       pdevinfo.parent = adev->parent ?
+               acpi_get_first_physical_node(adev->parent) : NULL;
        pdevinfo.name = dev_name(&adev->dev);
        pdevinfo.id = -1;
        pdevinfo.res = resources;
index 6979186dbd4b45bd19b60b51d4fd76c76abd683f..b5e54f2da53de81a9d31448265d3633e47f696b6 100644 (file)
@@ -514,7 +514,24 @@ static struct acpi_scan_handler processor_handler = {
        },
 };
 
+static int acpi_processor_container_attach(struct acpi_device *dev,
+                                          const struct acpi_device_id *id)
+{
+       return 1;
+}
+
+static const struct acpi_device_id processor_container_ids[] = {
+       { ACPI_PROCESSOR_CONTAINER_HID, },
+       { }
+};
+
+static struct acpi_scan_handler processor_container_handler = {
+       .ids = processor_container_ids,
+       .attach = acpi_processor_container_attach,
+};
+
 void __init acpi_processor_init(void)
 {
        acpi_scan_add_handler_with_hotplug(&processor_handler, "processor");
+       acpi_scan_add_handler(&processor_container_handler);
 }
index 55c8197036f31634439bb363f7d6125ef44f08a4..51b073b68f161799119af908957b1254d7c68781 100644 (file)
@@ -165,7 +165,7 @@ ACPI_GLOBAL(u8, acpi_gbl_next_owner_id_offset);
 
 /* Initialization sequencing */
 
-ACPI_INIT_GLOBAL(u8, acpi_gbl_reg_methods_enabled, FALSE);
+ACPI_INIT_GLOBAL(u8, acpi_gbl_namespace_initialized, FALSE);
 
 /* Misc */
 
index e4977fac9c1dba3b87a21124f2efe8b506f898e6..9562a10a1a18855c70fc1436605a3a178961ebf3 100644 (file)
@@ -85,7 +85,7 @@ union acpi_parse_object;
 #define ACPI_MTX_MEMORY                 5      /* Debug memory tracking lists */
 
 #define ACPI_MAX_MUTEX                  5
-#define ACPI_NUM_MUTEX                  ACPI_MAX_MUTEX+1
+#define ACPI_NUM_MUTEX                  (ACPI_MAX_MUTEX+1)
 
 /* Lock structure for reader/writer interfaces */
 
@@ -103,11 +103,11 @@ struct acpi_rw_lock {
 #define ACPI_LOCK_HARDWARE              1
 
 #define ACPI_MAX_LOCK                   1
-#define ACPI_NUM_LOCK                   ACPI_MAX_LOCK+1
+#define ACPI_NUM_LOCK                   (ACPI_MAX_LOCK+1)
 
 /* This Thread ID means that the mutex is not in use (unlocked) */
 
-#define ACPI_MUTEX_NOT_ACQUIRED         (acpi_thread_id) 0
+#define ACPI_MUTEX_NOT_ACQUIRED         ((acpi_thread_id) 0)
 
 /* This Thread ID means an invalid thread ID */
 
index 9684ed61284df5739bcd80215974d5c649f747fd..022d69cb345a07014d73f674e5a55dd189042952 100644 (file)
@@ -88,7 +88,7 @@
  */
 acpi_status acpi_ns_initialize_objects(void);
 
-acpi_status acpi_ns_initialize_devices(void);
+acpi_status acpi_ns_initialize_devices(u32 flags);
 
 /*
  * nsload -  Namespace loading
index 52f6bee52d474da83eb77d834bfa8fc18ee73f35..5faeab41e3028c65d472f3dcd07ba00cf06162c6 100644 (file)
@@ -1125,7 +1125,7 @@ const union acpi_predefined_info acpi_gbl_resource_names[] = {
        PACKAGE_INFO(0, 0, 0, 0, 0, 0)  /* Table terminator */
 };
 
-static const union acpi_predefined_info acpi_gbl_scope_names[] = {
+const union acpi_predefined_info acpi_gbl_scope_names[] = {
        {{"_GPE", 0, 0}},
        {{"_PR_", 0, 0}},
        {{"_SB_", 0, 0}},
index 7ec62c46128013c43104190d868764fb58e83541..772178c96ccf25a90a003135bb0f33ab89bcf104 100644 (file)
@@ -348,7 +348,7 @@ void acpi_db_display_table_info(char *table_arg)
                } else {
                        /* If the pointer is null, the table has been unloaded */
 
-                       ACPI_INFO((AE_INFO, "%4.4s - Table has been unloaded",
+                       ACPI_INFO(("%4.4s - Table has been unloaded",
                                   table_desc->signature.ascii));
                }
        }
index 9fee88f1c6540fa6277e9349f8690d1ca138b7b5..68f4e0f4b095ef0e1f7753d2680a3dc3e1ecf305 100644 (file)
@@ -408,7 +408,7 @@ void acpi_db_dump_pld_buffer(union acpi_object *obj_desc)
 
        new_buffer = acpi_db_encode_pld_buffer(pld_info);
        if (!new_buffer) {
-               return;
+               goto exit;
        }
 
        /* The two bit-packed buffers should match */
@@ -479,6 +479,7 @@ void acpi_db_dump_pld_buffer(union acpi_object *obj_desc)
                               pld_info->horizontal_offset);
        }
 
-       ACPI_FREE(pld_info);
        ACPI_FREE(new_buffer);
+exit:
+       ACPI_FREE(pld_info);
 }
index 6a72047aae1c4c0c1db21db5739172011bf4f217..1982310e6d83a3aa6a6e510e8f72682fb1c05226 100644 (file)
@@ -809,8 +809,7 @@ acpi_ds_terminate_control_method(union acpi_operand_object *method_desc,
                if (method_desc->method.
                    info_flags & ACPI_METHOD_SERIALIZED_PENDING) {
                        if (walk_state) {
-                               ACPI_INFO((AE_INFO,
-                                          "Marking method %4.4s as Serialized "
+                               ACPI_INFO(("Marking method %4.4s as Serialized "
                                           "because of AE_ALREADY_EXISTS error",
                                           walk_state->method_node->name.
                                           ascii));
index c303e9d9266f6e5ec4afed89dc825f0c2fac449f..a91de2b4603c78cfe7a627c092e3e37b922bbdd8 100644 (file)
@@ -524,8 +524,7 @@ acpi_ds_build_internal_package_obj(struct acpi_walk_state *walk_state,
                        arg = arg->common.next;
                }
 
-               ACPI_INFO((AE_INFO,
-                          "Actual Package length (%u) is larger than "
+               ACPI_INFO(("Actual Package length (%u) is larger than "
                           "NumElements field (%u), truncated",
                           i, element_count));
        } else if (i < element_count) {
index 9275e626ed8d8d04e4c920ff494e35ac3c236e73..447fa1cac64fdb3c3bdde2f3199e0ae326a8d953 100644 (file)
@@ -499,8 +499,7 @@ acpi_ev_initialize_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
        }
 
        if (gpe_enabled_count) {
-               ACPI_INFO((AE_INFO,
-                          "Enabled %u GPEs in block %02X to %02X",
+               ACPI_INFO(("Enabled %u GPEs in block %02X to %02X",
                           gpe_enabled_count, (u32)gpe_block->block_base_number,
                           (u32)(gpe_block->block_base_number +
                                 (gpe_block->gpe_count - 1))));
index 9fdd8d09141b89e52d059cc6d9bad027fa9fe89d..7dc75474c8979ceb974e50d2eb66898bb87c3293 100644 (file)
@@ -281,7 +281,7 @@ void acpi_ev_update_gpes(acpi_owner_id table_owner_id)
        }
 
        if (walk_info.count) {
-               ACPI_INFO((AE_INFO, "Enabled %u new GPEs", walk_info.count));
+               ACPI_INFO(("Enabled %u new GPEs", walk_info.count));
        }
 
        (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
index 47092b4d633cca85f68e16962945bb2fe2164452..63924d1c737a0b303f999fa4de9f08cf5e12a89b 100644 (file)
@@ -600,7 +600,7 @@ acpi_ev_execute_reg_method(union acpi_operand_object *region_obj, u32 function)
 
        if (region_obj2->extra.method_REG == NULL ||
            region_obj->region.handler == NULL ||
-           !acpi_gbl_reg_methods_enabled) {
+           !acpi_gbl_namespace_initialized) {
                return_ACPI_STATUS(AE_OK);
        }
 
index 011df210b7b2339323866f71737680b7e1ed8ead..f7416130103798549e0b95f0f80069e381c30195 100644 (file)
@@ -252,7 +252,7 @@ acpi_ex_load_table_op(struct acpi_walk_state *walk_state,
 
        status = acpi_get_table_by_index(table_index, &table);
        if (ACPI_SUCCESS(status)) {
-               ACPI_INFO((AE_INFO, "Dynamic OEM Table Load:"));
+               ACPI_INFO(("Dynamic OEM Table Load:"));
                acpi_tb_print_table_header(0, table);
        }
 
@@ -472,7 +472,7 @@ acpi_ex_load_op(union acpi_operand_object *obj_desc,
 
        /* Install the new table into the local data structures */
 
-       ACPI_INFO((AE_INFO, "Dynamic OEM Table Load:"));
+       ACPI_INFO(("Dynamic OEM Table Load:"));
        (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
 
        status = acpi_tb_install_standard_table(ACPI_PTR_TO_PHYSADDR(table),
index 28eb861c44ebeaab2715ee19cae7c9e202daea00..5aa21c4eda1d5b4a8f1101d00c51a50b95353060 100644 (file)
@@ -123,8 +123,10 @@ acpi_status acpi_ex_opcode_3A_0T_0R(struct acpi_walk_state *walk_state)
                 * op is intended for use by disassemblers in order to properly
                 * disassemble control method invocations. The opcode or group of
                 * opcodes should be surrounded by an "if (0)" clause to ensure that
-                * AML interpreters never see the opcode.
+                * AML interpreters never see the opcode. Thus, something is
+                * wrong if an external opcode ever gets here.
                 */
+               ACPI_ERROR((AE_INFO, "Executed External Op"));
                status = AE_OK;
                goto cleanup;
 
index 65d58bea43207a80b61805bb51cd9bb1744c5bfd..5d59cfcef6f4eb8a5e8fa0a1b1fdaee40fff1dba 100644 (file)
@@ -378,8 +378,7 @@ void acpi_ns_exec_module_code_list(void)
                acpi_ut_remove_reference(prev);
        }
 
-       ACPI_INFO((AE_INFO,
-                  "Executed %u blocks of module-level executable AML code",
+       ACPI_INFO(("Executed %u blocks of module-level executable AML code",
                   method_count));
 
        ACPI_FREE(info);
index bd75d46234a4209c62bab768edcf3566fc7ed011..d4aa8b696ee9d8e970c904e62ef95907841f1b4c 100644 (file)
@@ -46,6 +46,7 @@
 #include "acnamesp.h"
 #include "acdispat.h"
 #include "acinterp.h"
+#include "acevents.h"
 
 #define _COMPONENT          ACPI_NAMESPACE
 ACPI_MODULE_NAME("nsinit")
@@ -83,6 +84,8 @@ acpi_status acpi_ns_initialize_objects(void)
 
        ACPI_FUNCTION_TRACE(ns_initialize_objects);
 
+       ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
+                         "[Init] Completing Initialization of ACPI Objects\n"));
        ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
                          "**** Starting initialization of namespace objects ****\n"));
        ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT,
@@ -133,82 +136,108 @@ acpi_status acpi_ns_initialize_objects(void)
  *
  ******************************************************************************/
 
-acpi_status acpi_ns_initialize_devices(void)
+acpi_status acpi_ns_initialize_devices(u32 flags)
 {
-       acpi_status status;
+       acpi_status status = AE_OK;
        struct acpi_device_walk_info info;
 
        ACPI_FUNCTION_TRACE(ns_initialize_devices);
 
-       /* Init counters */
+       if (!(flags & ACPI_NO_DEVICE_INIT)) {
+               ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
+                                 "[Init] Initializing ACPI Devices\n"));
 
-       info.device_count = 0;
-       info.num_STA = 0;
-       info.num_INI = 0;
+               /* Init counters */
 
-       ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT,
-                             "Initializing Device/Processor/Thermal objects "
-                             "and executing _INI/_STA methods:\n"));
+               info.device_count = 0;
+               info.num_STA = 0;
+               info.num_INI = 0;
 
-       /* Tree analysis: find all subtrees that contain _INI methods */
+               ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT,
+                                     "Initializing Device/Processor/Thermal objects "
+                                     "and executing _INI/_STA methods:\n"));
 
-       status = acpi_ns_walk_namespace(ACPI_TYPE_ANY, ACPI_ROOT_OBJECT,
-                                       ACPI_UINT32_MAX, FALSE,
-                                       acpi_ns_find_ini_methods, NULL, &info,
-                                       NULL);
-       if (ACPI_FAILURE(status)) {
-               goto error_exit;
-       }
+               /* Tree analysis: find all subtrees that contain _INI methods */
+
+               status = acpi_ns_walk_namespace(ACPI_TYPE_ANY, ACPI_ROOT_OBJECT,
+                                               ACPI_UINT32_MAX, FALSE,
+                                               acpi_ns_find_ini_methods, NULL,
+                                               &info, NULL);
+               if (ACPI_FAILURE(status)) {
+                       goto error_exit;
+               }
+
+               /* Allocate the evaluation information block */
 
-       /* Allocate the evaluation information block */
+               info.evaluate_info =
+                   ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_evaluate_info));
+               if (!info.evaluate_info) {
+                       status = AE_NO_MEMORY;
+                       goto error_exit;
+               }
 
-       info.evaluate_info =
-           ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_evaluate_info));
-       if (!info.evaluate_info) {
-               status = AE_NO_MEMORY;
-               goto error_exit;
+               /*
+                * Execute the "global" _INI method that may appear at the root.
+                * This support is provided for Windows compatibility (Vista+) and
+                * is not part of the ACPI specification.
+                */
+               info.evaluate_info->prefix_node = acpi_gbl_root_node;
+               info.evaluate_info->relative_pathname = METHOD_NAME__INI;
+               info.evaluate_info->parameters = NULL;
+               info.evaluate_info->flags = ACPI_IGNORE_RETURN_VALUE;
+
+               status = acpi_ns_evaluate(info.evaluate_info);
+               if (ACPI_SUCCESS(status)) {
+                       info.num_INI++;
+               }
        }
 
        /*
-        * Execute the "global" _INI method that may appear at the root. This
-        * support is provided for Windows compatibility (Vista+) and is not
-        * part of the ACPI specification.
+        * Run all _REG methods
+        *
+        * Note: Any objects accessed by the _REG methods will be automatically
+        * initialized, even if they contain executable AML (see the call to
+        * acpi_ns_initialize_objects below).
         */
-       info.evaluate_info->prefix_node = acpi_gbl_root_node;
-       info.evaluate_info->relative_pathname = METHOD_NAME__INI;
-       info.evaluate_info->parameters = NULL;
-       info.evaluate_info->flags = ACPI_IGNORE_RETURN_VALUE;
+       if (!(flags & ACPI_NO_ADDRESS_SPACE_INIT)) {
+               ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
+                                 "[Init] Executing _REG OpRegion methods\n"));
 
-       status = acpi_ns_evaluate(info.evaluate_info);
-       if (ACPI_SUCCESS(status)) {
-               info.num_INI++;
+               status = acpi_ev_initialize_op_regions();
+               if (ACPI_FAILURE(status)) {
+                       goto error_exit;
+               }
        }
 
-       /* Walk namespace to execute all _INIs on present devices */
+       if (!(flags & ACPI_NO_DEVICE_INIT)) {
 
-       status = acpi_ns_walk_namespace(ACPI_TYPE_ANY, ACPI_ROOT_OBJECT,
-                                       ACPI_UINT32_MAX, FALSE,
-                                       acpi_ns_init_one_device, NULL, &info,
-                                       NULL);
+               /* Walk namespace to execute all _INIs on present devices */
 
-       /*
-        * Any _OSI requests should be completed by now. If the BIOS has
-        * requested any Windows OSI strings, we will always truncate
-        * I/O addresses to 16 bits -- for Windows compatibility.
-        */
-       if (acpi_gbl_osi_data >= ACPI_OSI_WIN_2000) {
-               acpi_gbl_truncate_io_addresses = TRUE;
-       }
+               status = acpi_ns_walk_namespace(ACPI_TYPE_ANY, ACPI_ROOT_OBJECT,
+                                               ACPI_UINT32_MAX, FALSE,
+                                               acpi_ns_init_one_device, NULL,
+                                               &info, NULL);
 
-       ACPI_FREE(info.evaluate_info);
-       if (ACPI_FAILURE(status)) {
-               goto error_exit;
-       }
+               /*
+                * Any _OSI requests should be completed by now. If the BIOS has
+                * requested any Windows OSI strings, we will always truncate
+                * I/O addresses to 16 bits -- for Windows compatibility.
+                */
+               if (acpi_gbl_osi_data >= ACPI_OSI_WIN_2000) {
+                       acpi_gbl_truncate_io_addresses = TRUE;
+               }
 
-       ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT,
-                             "    Executed %u _INI methods requiring %u _STA executions "
-                             "(examined %u objects)\n",
-                             info.num_INI, info.num_STA, info.device_count));
+               ACPI_FREE(info.evaluate_info);
+               if (ACPI_FAILURE(status)) {
+                       goto error_exit;
+               }
+
+               ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT,
+                                     "    Executed %u _INI methods requiring %u _STA executions "
+                                     "(examined %u objects)\n",
+                                     info.num_INI, info.num_STA,
+                                     info.device_count));
+       }
 
        return_ACPI_STATUS(status);
 
index 305218539df28cda17b97992ff44a0dbaeb613de..d48cbed342c1dda500d5020c746a9caf1c72ad09 100644 (file)
@@ -269,8 +269,7 @@ acpi_ps_get_next_namepath(struct acpi_walk_state *walk_state,
         */
        if (ACPI_SUCCESS(status) &&
            possible_method_call && (node->type == ACPI_TYPE_METHOD)) {
-               if (GET_CURRENT_ARG_TYPE(walk_state->arg_types) ==
-                   ARGP_SUPERNAME) {
+               if (walk_state->opcode == AML_UNLOAD_OP) {
                        /*
                         * acpi_ps_get_next_namestring has increased the AML pointer,
                         * so we need to restore the saved AML pointer for method call.
@@ -697,7 +696,7 @@ static union acpi_parse_object *acpi_ps_get_next_field(struct acpi_parse_state
  *
  * PARAMETERS:  walk_state          - Current state
  *              parser_state        - Current parser state object
- *              arg_type            - The parser argument type (ARGP_*)
+ *              arg_type            - The argument type (AML_*_ARG)
  *              return_arg          - Where the next arg is returned
  *
  * RETURN:      Status, and an op object containing the next argument.
@@ -817,9 +816,9 @@ acpi_ps_get_next_arg(struct acpi_walk_state *walk_state,
                                return_ACPI_STATUS(AE_NO_MEMORY);
                        }
 
-                       /* super_name allows argument to be a method call */
+                       /* To support super_name arg of Unload */
 
-                       if (arg_type == ARGP_SUPERNAME) {
+                       if (walk_state->opcode == AML_UNLOAD_OP) {
                                status =
                                    acpi_ps_get_next_namepath(walk_state,
                                                              parser_state, arg,
index b661a1e013fbed639da4aab35002670c5fdc2f4e..4dc6108de4ffbf83a37ea714e0342cfcd8847d6c 100644 (file)
@@ -267,8 +267,7 @@ acpi_tb_install_standard_table(acpi_physical_address address,
        if (!reload &&
            acpi_gbl_disable_ssdt_table_install &&
            ACPI_COMPARE_NAME(&new_table_desc.signature, ACPI_SIG_SSDT)) {
-               ACPI_INFO((AE_INFO,
-                          "Ignoring installation of %4.4s at %8.8X%8.8X",
+               ACPI_INFO(("Ignoring installation of %4.4s at %8.8X%8.8X",
                           new_table_desc.signature.ascii,
                           ACPI_FORMAT_UINT64(address)));
                goto release_and_exit;
@@ -432,7 +431,7 @@ finish_override:
                return;
        }
 
-       ACPI_INFO((AE_INFO, "%4.4s 0x%8.8X%8.8X"
+       ACPI_INFO(("%4.4s 0x%8.8X%8.8X"
                   " %s table override, new table: 0x%8.8X%8.8X",
                   old_table_desc->signature.ascii,
                   ACPI_FORMAT_UINT64(old_table_desc->address),
index fd4146d4ff4902660996add0e78ca9582d07e5b3..26d61dbace0a1fa67b8706cc1ee0d124ac3d7e01 100644 (file)
@@ -132,7 +132,7 @@ acpi_tb_print_table_header(acpi_physical_address address,
 
                /* FACS only has signature and length fields */
 
-               ACPI_INFO((AE_INFO, "%-4.4s 0x%8.8X%8.8X %06X",
+               ACPI_INFO(("%-4.4s 0x%8.8X%8.8X %06X",
                           header->signature, ACPI_FORMAT_UINT64(address),
                           header->length));
        } else if (ACPI_VALIDATE_RSDP_SIG(header->signature)) {
@@ -144,7 +144,7 @@ acpi_tb_print_table_header(acpi_physical_address address,
                       ACPI_OEM_ID_SIZE);
                acpi_tb_fix_string(local_header.oem_id, ACPI_OEM_ID_SIZE);
 
-               ACPI_INFO((AE_INFO, "RSDP 0x%8.8X%8.8X %06X (v%.2d %-6.6s)",
+               ACPI_INFO(("RSDP 0x%8.8X%8.8X %06X (v%.2d %-6.6s)",
                           ACPI_FORMAT_UINT64(address),
                           (ACPI_CAST_PTR(struct acpi_table_rsdp, header)->
                            revision >
@@ -158,8 +158,7 @@ acpi_tb_print_table_header(acpi_physical_address address,
 
                acpi_tb_cleanup_table_header(&local_header, header);
 
-               ACPI_INFO((AE_INFO,
-                          "%-4.4s 0x%8.8X%8.8X"
+               ACPI_INFO(("%-4.4s 0x%8.8X%8.8X"
                           " %06X (v%.2d %-6.6s %-8.8s %08X %-4.4s %08X)",
                           local_header.signature, ACPI_FORMAT_UINT64(address),
                           local_header.length, local_header.revision,
index 3269bef371d7ddd21a32ac18f58d404cefdeb44b..9240c76d2823e333d2b30ca2d24ad32bfae3c5cf 100644 (file)
@@ -174,9 +174,7 @@ struct acpi_table_header *acpi_tb_copy_dsdt(u32 table_index)
                                      ACPI_TABLE_ORIGIN_INTERNAL_VIRTUAL,
                                      new_table);
 
-       ACPI_INFO((AE_INFO,
-                  "Forced DSDT copy: length 0x%05X copied locally, original unmapped",
-                  new_table->length));
+       ACPI_INFO(("Forced DSDT copy: length 0x%05X copied locally, original unmapped", new_table->length));
 
        return (new_table);
 }
index 278666e39563e7fc3cacdc4ae6e36a5a7bdc3c93..3151968c10d1a0aef3c2a1610307568b8e171d4a 100644 (file)
@@ -47,6 +47,7 @@
 #include "accommon.h"
 #include "acnamesp.h"
 #include "actables.h"
+#include "acevents.h"
 
 #define _COMPONENT          ACPI_TABLES
 ACPI_MODULE_NAME("tbxfload")
@@ -68,6 +69,25 @@ acpi_status __init acpi_load_tables(void)
 
        ACPI_FUNCTION_TRACE(acpi_load_tables);
 
+       /*
+        * Install the default operation region handlers. These are the
+        * handlers that are defined by the ACPI specification to be
+        * "always accessible" -- namely, system_memory, system_IO, and
+        * PCI_Config. This also means that no _REG methods need to be
+        * run for these address spaces. We need to have these handlers
+        * installed before any AML code can be executed, especially any
+        * module-level code (11/2015).
+        * Note that we allow OSPMs to install their own region handlers
+        * between acpi_initialize_subsystem() and acpi_load_tables() to use
+        * their customized default region handlers.
+        */
+       status = acpi_ev_install_region_handlers();
+       if (ACPI_FAILURE(status) && status != AE_ALREADY_EXISTS) {
+               ACPI_EXCEPTION((AE_INFO, status,
+                               "During Region initialization"));
+               return_ACPI_STATUS(status);
+       }
+
        /* Load the namespace from the tables */
 
        status = acpi_tb_load_namespace();
@@ -83,6 +103,20 @@ acpi_status __init acpi_load_tables(void)
                                "While loading namespace from ACPI tables"));
        }
 
+       if (!acpi_gbl_group_module_level_code) {
+               /*
+                * Initialize the objects that remain uninitialized. This
+                * runs the executable AML that may be part of the
+                * declaration of these objects:
+                * operation_regions, buffer_fields, Buffers, and Packages.
+                */
+               status = acpi_ns_initialize_objects();
+               if (ACPI_FAILURE(status)) {
+                       return_ACPI_STATUS(status);
+               }
+       }
+
+       acpi_gbl_namespace_initialized = TRUE;
        return_ACPI_STATUS(status);
 }
 
@@ -206,9 +240,7 @@ acpi_status acpi_tb_load_namespace(void)
        }
 
        if (!tables_failed) {
-               ACPI_INFO((AE_INFO,
-                          "%u ACPI AML tables successfully acquired and loaded\n",
-                          tables_loaded));
+               ACPI_INFO(("%u ACPI AML tables successfully acquired and loaded\n", tables_loaded));
        } else {
                ACPI_ERROR((AE_INFO,
                            "%u table load failures, %u successful",
@@ -301,7 +333,7 @@ acpi_status acpi_load_table(struct acpi_table_header *table)
 
        /* Install the table and load it into the namespace */
 
-       ACPI_INFO((AE_INFO, "Host-directed Dynamic ACPI Table Load:"));
+       ACPI_INFO(("Host-directed Dynamic ACPI Table Load:"));
        (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
 
        status = acpi_tb_install_standard_table(ACPI_PTR_TO_PHYSADDR(table),
index c9a720f2274a59053533f9ac143dbd350398d400..f8e9978888e162e7f035a69b339741ca515ef2a3 100644 (file)
@@ -245,7 +245,7 @@ void *acpi_os_acquire_object(struct acpi_memory_list *cache)
        acpi_status status;
        void *object;
 
-       ACPI_FUNCTION_NAME(os_acquire_object);
+       ACPI_FUNCTION_TRACE(os_acquire_object);
 
        if (!cache) {
                return_PTR(NULL);
index c427a5cda465e2722b93c4181791063d17bc952d..d5c3adf19bd0595ea6f1e1ea356d7d081986246f 100644 (file)
@@ -140,6 +140,67 @@ int acpi_ut_stricmp(char *string1, char *string2)
        return (c1 - c2);
 }
 
+#if defined (ACPI_DEBUGGER) || defined (ACPI_APPLICATION)
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ut_safe_strcpy, acpi_ut_safe_strcat, acpi_ut_safe_strncat
+ *
+ * PARAMETERS:  Adds a "DestSize" parameter to each of the standard string
+ *              functions. This is the size of the Destination buffer.
+ *
+ * RETURN:      TRUE if the operation would overflow the destination buffer.
+ *
+ * DESCRIPTION: Safe versions of standard Clib string functions. Ensure that
+ *              the result of the operation will not overflow the output string
+ *              buffer.
+ *
+ * NOTE:        These functions are typically only helpful for processing
+ *              user input and command lines. For most ACPICA code, the
+ *              required buffer length is precisely calculated before buffer
+ *              allocation, so the use of these functions is unnecessary.
+ *
+ ******************************************************************************/
+
+u8 acpi_ut_safe_strcpy(char *dest, acpi_size dest_size, char *source)
+{
+
+       if (strlen(source) >= dest_size) {
+               return (TRUE);
+       }
+
+       strcpy(dest, source);
+       return (FALSE);
+}
+
+u8 acpi_ut_safe_strcat(char *dest, acpi_size dest_size, char *source)
+{
+
+       if ((strlen(dest) + strlen(source)) >= dest_size) {
+               return (TRUE);
+       }
+
+       strcat(dest, source);
+       return (FALSE);
+}
+
+u8
+acpi_ut_safe_strncat(char *dest,
+                    acpi_size dest_size,
+                    char *source, acpi_size max_transfer_length)
+{
+       acpi_size actual_transfer_length;
+
+       actual_transfer_length = ACPI_MIN(max_transfer_length, strlen(source));
+
+       if ((strlen(dest) + actual_transfer_length) >= dest_size) {
+               return (TRUE);
+       }
+
+       strncat(dest, source, max_transfer_length);
+       return (FALSE);
+}
+#endif
+
 /*******************************************************************************
  *
  * FUNCTION:    acpi_ut_strtoul64
@@ -155,7 +216,15 @@ int acpi_ut_stricmp(char *string1, char *string2)
  *              32-bit or 64-bit conversion, depending on the current mode
  *              of the interpreter.
  *
- * NOTE:        Does not support Octal strings, not needed.
+ * NOTES:       acpi_gbl_integer_byte_width should be set to the proper width.
+ *              For the core ACPICA code, this width depends on the DSDT
+ *              version. For iASL, the default byte width is always 8.
+ *
+ *              Does not support Octal strings, not needed at this time.
+ *
+ *              There is an earlier version of the function after this one,
+ *              below. It is slightly different than this one, and the two
+ *              may eventually may need to be merged. (01/2016).
  *
  ******************************************************************************/
 
@@ -171,7 +240,7 @@ acpi_status acpi_ut_strtoul64(char *string, u32 base, u64 *ret_integer)
        u8 sign_of0x = 0;
        u8 term = 0;
 
-       ACPI_FUNCTION_TRACE_STR(ut_stroul64, string);
+       ACPI_FUNCTION_TRACE_STR(ut_strtoul64, string);
 
        switch (base) {
        case ACPI_ANY_BASE:
@@ -318,63 +387,162 @@ error_exit:
        }
 }
 
-#if defined (ACPI_DEBUGGER) || defined (ACPI_APPLICATION)
+#ifdef _OBSOLETE_FUNCTIONS
+/* TBD: use version in ACPICA main code base? */
+/* DONE: 01/2016 */
+
 /*******************************************************************************
  *
- * FUNCTION:    acpi_ut_safe_strcpy, acpi_ut_safe_strcat, acpi_ut_safe_strncat
+ * FUNCTION:    strtoul64
  *
- * PARAMETERS:  Adds a "DestSize" parameter to each of the standard string
- *              functions. This is the size of the Destination buffer.
+ * PARAMETERS:  string              - Null terminated string
+ *              terminater          - Where a pointer to the terminating byte
+ *                                    is returned
+ *              base                - Radix of the string
  *
- * RETURN:      TRUE if the operation would overflow the destination buffer.
+ * RETURN:      Converted value
  *
- * DESCRIPTION: Safe versions of standard Clib string functions. Ensure that
- *              the result of the operation will not overflow the output string
- *              buffer.
- *
- * NOTE:        These functions are typically only helpful for processing
- *              user input and command lines. For most ACPICA code, the
- *              required buffer length is precisely calculated before buffer
- *              allocation, so the use of these functions is unnecessary.
+ * DESCRIPTION: Convert a string into an unsigned value.
  *
  ******************************************************************************/
 
-u8 acpi_ut_safe_strcpy(char *dest, acpi_size dest_size, char *source)
+acpi_status strtoul64(char *string, u32 base, u64 *ret_integer)
 {
+       u32 index;
+       u32 sign;
+       u64 return_value = 0;
+       acpi_status status = AE_OK;
 
-       if (strlen(source) >= dest_size) {
-               return (TRUE);
+       *ret_integer = 0;
+
+       switch (base) {
+       case 0:
+       case 8:
+       case 10:
+       case 16:
+
+               break;
+
+       default:
+               /*
+                * The specified Base parameter is not in the domain of
+                * this function:
+                */
+               return (AE_BAD_PARAMETER);
        }
 
-       strcpy(dest, source);
-       return (FALSE);
-}
+       /* Skip over any white space in the buffer: */
 
-u8 acpi_ut_safe_strcat(char *dest, acpi_size dest_size, char *source)
-{
+       while (isspace((int)*string) || *string == '\t') {
+               ++string;
+       }
 
-       if ((strlen(dest) + strlen(source)) >= dest_size) {
-               return (TRUE);
+       /*
+        * The buffer may contain an optional plus or minus sign.
+        * If it does, then skip over it but remember what is was:
+        */
+       if (*string == '-') {
+               sign = ACPI_SIGN_NEGATIVE;
+               ++string;
+       } else if (*string == '+') {
+               ++string;
+               sign = ACPI_SIGN_POSITIVE;
+       } else {
+               sign = ACPI_SIGN_POSITIVE;
        }
 
-       strcat(dest, source);
-       return (FALSE);
-}
+       /*
+        * If the input parameter Base is zero, then we need to
+        * determine if it is octal, decimal, or hexadecimal:
+        */
+       if (base == 0) {
+               if (*string == '0') {
+                       if (tolower((int)*(++string)) == 'x') {
+                               base = 16;
+                               ++string;
+                       } else {
+                               base = 8;
+                       }
+               } else {
+                       base = 10;
+               }
+       }
 
-u8
-acpi_ut_safe_strncat(char *dest,
-                    acpi_size dest_size,
-                    char *source, acpi_size max_transfer_length)
-{
-       acpi_size actual_transfer_length;
+       /*
+        * For octal and hexadecimal bases, skip over the leading
+        * 0 or 0x, if they are present.
+        */
+       if (base == 8 && *string == '0') {
+               string++;
+       }
 
-       actual_transfer_length = ACPI_MIN(max_transfer_length, strlen(source));
+       if (base == 16 && *string == '0' && tolower((int)*(++string)) == 'x') {
+               string++;
+       }
 
-       if ((strlen(dest) + actual_transfer_length) >= dest_size) {
-               return (TRUE);
+       /* Main loop: convert the string to an unsigned long */
+
+       while (*string) {
+               if (isdigit((int)*string)) {
+                       index = ((u8)*string) - '0';
+               } else {
+                       index = (u8)toupper((int)*string);
+                       if (isupper((int)index)) {
+                               index = index - 'A' + 10;
+                       } else {
+                               goto error_exit;
+                       }
+               }
+
+               if (index >= base) {
+                       goto error_exit;
+               }
+
+               /* Check to see if value is out of range: */
+
+               if (return_value > ((ACPI_UINT64_MAX - (u64)index) / (u64)base)) {
+                       goto error_exit;
+               } else {
+                       return_value *= base;
+                       return_value += index;
+               }
+
+               ++string;
        }
 
-       strncat(dest, source, max_transfer_length);
-       return (FALSE);
+       /* If a minus sign was present, then "the conversion is negated": */
+
+       if (sign == ACPI_SIGN_NEGATIVE) {
+               return_value = (ACPI_UINT32_MAX - return_value) + 1;
+       }
+
+       *ret_integer = return_value;
+       return (status);
+
+error_exit:
+       switch (base) {
+       case 8:
+
+               status = AE_BAD_OCTAL_CONSTANT;
+               break;
+
+       case 10:
+
+               status = AE_BAD_DECIMAL_CONSTANT;
+               break;
+
+       case 16:
+
+               status = AE_BAD_HEX_CONSTANT;
+               break;
+
+       default:
+
+               /* Base validated above */
+
+               break;
+       }
+
+       return (status);
 }
 #endif
index c7c2bb8f3559cc71c07fbb8933b13a53d6874ac2..60c406a8efcb3b533b6d2fd1f1f5c9f75f13831e 100644 (file)
@@ -712,7 +712,7 @@ void acpi_ut_dump_allocations(u32 component, const char *module)
        /* Print summary */
 
        if (!num_outstanding) {
-               ACPI_INFO((AE_INFO, "No outstanding allocations"));
+               ACPI_INFO(("No outstanding allocations"));
        } else {
                ACPI_ERROR((AE_INFO, "%u(0x%X) Outstanding allocations",
                            num_outstanding, num_outstanding));
index 6fe59597b5999dc340cd25590e8c6b00a0cc14b6..d9f15cbcd8a0b9228382042abdd802b4322d11ba 100644 (file)
@@ -175,8 +175,7 @@ ACPI_EXPORT_SYMBOL(acpi_warning)
  * TBD: module_name and line_number args are not needed, should be removed.
  *
  ******************************************************************************/
-void ACPI_INTERNAL_VAR_XFACE
-acpi_info(const char *module_name, u32 line_number, const char *format, ...)
+void ACPI_INTERNAL_VAR_XFACE acpi_info(const char *format, ...)
 {
        va_list arg_list;
 
index 721b87cce90865c8c3d20295b664cf2665de84b6..75b5f27da2674b4e34d84cd65aadf0a42ee76819 100644 (file)
@@ -154,21 +154,6 @@ acpi_status __init acpi_enable_subsystem(u32 flags)
         */
        acpi_gbl_early_initialization = FALSE;
 
-       /*
-        * Install the default operation region handlers. These are the
-        * handlers that are defined by the ACPI specification to be
-        * "always accessible" -- namely, system_memory, system_IO, and
-        * PCI_Config. This also means that no _REG methods need to be
-        * run for these address spaces. We need to have these handlers
-        * installed before any AML code can be executed, especially any
-        * module-level code (11/2015).
-        */
-       status = acpi_ev_install_region_handlers();
-       if (ACPI_FAILURE(status)) {
-               ACPI_EXCEPTION((AE_INFO, status,
-                               "During Region initialization"));
-               return_ACPI_STATUS(status);
-       }
 #if (!ACPI_REDUCED_HARDWARE)
 
        /* Enable ACPI mode */
@@ -260,23 +245,6 @@ acpi_status __init acpi_initialize_objects(u32 flags)
 
        ACPI_FUNCTION_TRACE(acpi_initialize_objects);
 
-       /*
-        * Run all _REG methods
-        *
-        * Note: Any objects accessed by the _REG methods will be automatically
-        * initialized, even if they contain executable AML (see the call to
-        * acpi_ns_initialize_objects below).
-        */
-       acpi_gbl_reg_methods_enabled = TRUE;
-       if (!(flags & ACPI_NO_ADDRESS_SPACE_INIT)) {
-               ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
-                                 "[Init] Executing _REG OpRegion methods\n"));
-
-               status = acpi_ev_initialize_op_regions();
-               if (ACPI_FAILURE(status)) {
-                       return_ACPI_STATUS(status);
-               }
-       }
 #ifdef ACPI_EXEC_APP
        /*
         * This call implements the "initialization file" option for acpi_exec.
@@ -299,32 +267,27 @@ acpi_status __init acpi_initialize_objects(u32 flags)
         */
        if (acpi_gbl_group_module_level_code) {
                acpi_ns_exec_module_code_list();
-       }
-
-       /*
-        * Initialize the objects that remain uninitialized. This runs the
-        * executable AML that may be part of the declaration of these objects:
-        * operation_regions, buffer_fields, Buffers, and Packages.
-        */
-       if (!(flags & ACPI_NO_OBJECT_INIT)) {
-               ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
-                                 "[Init] Completing Initialization of ACPI Objects\n"));
 
-               status = acpi_ns_initialize_objects();
-               if (ACPI_FAILURE(status)) {
-                       return_ACPI_STATUS(status);
+               /*
+                * Initialize the objects that remain uninitialized. This
+                * runs the executable AML that may be part of the
+                * declaration of these objects:
+                * operation_regions, buffer_fields, Buffers, and Packages.
+                */
+               if (!(flags & ACPI_NO_OBJECT_INIT)) {
+                       status = acpi_ns_initialize_objects();
+                       if (ACPI_FAILURE(status)) {
+                               return_ACPI_STATUS(status);
+                       }
                }
        }
 
        /*
-        * Initialize all device objects in the namespace. This runs the device
-        * _STA and _INI methods.
+        * Initialize all device/region objects in the namespace. This runs
+        * the device _STA and _INI methods and region _REG methods.
         */
-       if (!(flags & ACPI_NO_DEVICE_INIT)) {
-               ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
-                                 "[Init] Initializing ACPI Devices\n"));
-
-               status = acpi_ns_initialize_devices();
+       if (!(flags & (ACPI_NO_DEVICE_INIT | ACPI_NO_ADDRESS_SPACE_INIT))) {
+               status = acpi_ns_initialize_devices(flags);
                if (ACPI_FAILURE(status)) {
                        return_ACPI_STATUS(status);
                }
index a2c8d7adb6eb5a47554708032d30b4b34bbf2a62..da370e1d31f4fe922510e9ba5adc7504b9e859f9 100644 (file)
@@ -536,7 +536,8 @@ int apei_resources_request(struct apei_resources *resources,
                goto err_unmap_ioport;
        }
 
-       return 0;
+       goto arch_res_fini;
+
 err_unmap_ioport:
        list_for_each_entry(res, &resources->ioport, list) {
                if (res == res_bak)
@@ -551,7 +552,8 @@ err_unmap_iomem:
                release_mem_region(res->start, res->end - res->start);
        }
 arch_res_fini:
-       apei_resources_fini(&arch_res);
+       if (arch_apei_filter_addr)
+               apei_resources_fini(&arch_res);
 nvs_res_fini:
        apei_resources_fini(&nvs_resources);
        return rc;
index 6e6bc1059301745fc1d5fd6ebf943d048082e821..006c3894c6ea1a9b3fd344b28b49a88b851c5fc4 100644 (file)
@@ -1207,6 +1207,9 @@ static int __init erst_init(void)
                "Failed to allocate %lld bytes for persistent store error log.\n",
                erst_erange.size);
 
+       /* Cleanup ERST Resources */
+       apei_resources_fini(&erst_resources);
+
        return 0;
 
 err_release_erange:
index 891c42d1cd652c732f4957fc8bb2c2f20a5fc470..0e8567846f1afbce5185361be758cb743c0b6836 100644 (file)
@@ -479,24 +479,38 @@ static void acpi_device_remove_notify_handler(struct acpi_device *device)
                              Device Matching
    -------------------------------------------------------------------------- */
 
-static struct acpi_device *acpi_primary_dev_companion(struct acpi_device *adev,
-                                                     const struct device *dev)
+/**
+ * acpi_get_first_physical_node - Get first physical node of an ACPI device
+ * @adev:      ACPI device in question
+ *
+ * Return: First physical node of ACPI device @adev
+ */
+struct device *acpi_get_first_physical_node(struct acpi_device *adev)
 {
        struct mutex *physical_node_lock = &adev->physical_node_lock;
+       struct device *phys_dev;
 
        mutex_lock(physical_node_lock);
        if (list_empty(&adev->physical_node_list)) {
-               adev = NULL;
+               phys_dev = NULL;
        } else {
                const struct acpi_device_physical_node *node;
 
                node = list_first_entry(&adev->physical_node_list,
                                        struct acpi_device_physical_node, node);
-               if (node->dev != dev)
-                       adev = NULL;
+
+               phys_dev = node->dev;
        }
        mutex_unlock(physical_node_lock);
-       return adev;
+       return phys_dev;
+}
+
+static struct acpi_device *acpi_primary_dev_companion(struct acpi_device *adev,
+                                                     const struct device *dev)
+{
+       const struct device *phys_dev = acpi_get_first_physical_node(adev);
+
+       return phys_dev && phys_dev == dev ? adev : NULL;
 }
 
 /**
index 6730f965b3793f25ba73125a00b18f346decf75f..8adac69dba3da52e5bd4ebb7f0780543fc17c51f 100644 (file)
@@ -39,6 +39,7 @@
 
 #include <linux/cpufreq.h>
 #include <linux/delay.h>
+#include <linux/ktime.h>
 
 #include <acpi/cppc_acpi.h>
 /*
@@ -63,58 +64,140 @@ static struct mbox_chan *pcc_channel;
 static void __iomem *pcc_comm_addr;
 static u64 comm_base_addr;
 static int pcc_subspace_idx = -1;
-static u16 pcc_cmd_delay;
 static bool pcc_channel_acquired;
+static ktime_t deadline;
+static unsigned int pcc_mpar, pcc_mrtt;
+
+/* pcc mapped address + header size + offset within PCC subspace */
+#define GET_PCC_VADDR(offs) (pcc_comm_addr + 0x8 + (offs))
 
 /*
  * Arbitrary Retries in case the remote processor is slow to respond
- * to PCC commands.
+ * to PCC commands. Keeping it high enough to cover emulators where
+ * the processors run painfully slow.
  */
 #define NUM_RETRIES 500
 
+static int check_pcc_chan(void)
+{
+       int ret = -EIO;
+       struct acpi_pcct_shared_memory __iomem *generic_comm_base = pcc_comm_addr;
+       ktime_t next_deadline = ktime_add(ktime_get(), deadline);
+
+       /* Retry in case the remote processor was too slow to catch up. */
+       while (!ktime_after(ktime_get(), next_deadline)) {
+               /*
+                * Per spec, prior to boot the PCC space wil be initialized by
+                * platform and should have set the command completion bit when
+                * PCC can be used by OSPM
+                */
+               if (readw_relaxed(&generic_comm_base->status) & PCC_CMD_COMPLETE) {
+                       ret = 0;
+                       break;
+               }
+               /*
+                * Reducing the bus traffic in case this loop takes longer than
+                * a few retries.
+                */
+               udelay(3);
+       }
+
+       return ret;
+}
+
 static int send_pcc_cmd(u16 cmd)
 {
-       int retries, result = -EIO;
-       struct acpi_pcct_hw_reduced *pcct_ss = pcc_channel->con_priv;
+       int ret = -EIO;
        struct acpi_pcct_shared_memory *generic_comm_base =
                (struct acpi_pcct_shared_memory *) pcc_comm_addr;
-       u32 cmd_latency = pcct_ss->latency;
+       static ktime_t last_cmd_cmpl_time, last_mpar_reset;
+       static int mpar_count;
+       unsigned int time_delta;
 
-       /* Min time OS should wait before sending next command. */
-       udelay(pcc_cmd_delay);
+       /*
+        * For CMD_WRITE we know for a fact the caller should have checked
+        * the channel before writing to PCC space
+        */
+       if (cmd == CMD_READ) {
+               ret = check_pcc_chan();
+               if (ret)
+                       return ret;
+       }
+
+       /*
+        * Handle the Minimum Request Turnaround Time(MRTT)
+        * "The minimum amount of time that OSPM must wait after the completion
+        * of a command before issuing the next command, in microseconds"
+        */
+       if (pcc_mrtt) {
+               time_delta = ktime_us_delta(ktime_get(), last_cmd_cmpl_time);
+               if (pcc_mrtt > time_delta)
+                       udelay(pcc_mrtt - time_delta);
+       }
+
+       /*
+        * Handle the non-zero Maximum Periodic Access Rate(MPAR)
+        * "The maximum number of periodic requests that the subspace channel can
+        * support, reported in commands per minute. 0 indicates no limitation."
+        *
+        * This parameter should be ideally zero or large enough so that it can
+        * handle maximum number of requests that all the cores in the system can
+        * collectively generate. If it is not, we will follow the spec and just
+        * not send the request to the platform after hitting the MPAR limit in
+        * any 60s window
+        */
+       if (pcc_mpar) {
+               if (mpar_count == 0) {
+                       time_delta = ktime_ms_delta(ktime_get(), last_mpar_reset);
+                       if (time_delta < 60 * MSEC_PER_SEC) {
+                               pr_debug("PCC cmd not sent due to MPAR limit");
+                               return -EIO;
+                       }
+                       last_mpar_reset = ktime_get();
+                       mpar_count = pcc_mpar;
+               }
+               mpar_count--;
+       }
 
        /* Write to the shared comm region. */
-       writew(cmd, &generic_comm_base->command);
+       writew_relaxed(cmd, &generic_comm_base->command);
 
        /* Flip CMD COMPLETE bit */
-       writew(0, &generic_comm_base->status);
+       writew_relaxed(0, &generic_comm_base->status);
 
        /* Ring doorbell */
-       result = mbox_send_message(pcc_channel, &cmd);
-       if (result < 0) {
+       ret = mbox_send_message(pcc_channel, &cmd);
+       if (ret < 0) {
                pr_err("Err sending PCC mbox message. cmd:%d, ret:%d\n",
-                               cmd, result);
-               return result;
+                               cmd, ret);
+               return ret;
        }
 
-       /* Wait for a nominal time to let platform process command. */
-       udelay(cmd_latency);
-
-       /* Retry in case the remote processor was too slow to catch up. */
-       for (retries = NUM_RETRIES; retries > 0; retries--) {
-               if (readw_relaxed(&generic_comm_base->status) & PCC_CMD_COMPLETE) {
-                       result = 0;
-                       break;
-               }
+       /*
+        * For READs we need to ensure the cmd completed to ensure
+        * the ensuing read()s can proceed. For WRITEs we dont care
+        * because the actual write()s are done before coming here
+        * and the next READ or WRITE will check if the channel
+        * is busy/free at the entry of this call.
+        *
+        * If Minimum Request Turnaround Time is non-zero, we need
+        * to record the completion time of both READ and WRITE
+        * command for proper handling of MRTT, so we need to check
+        * for pcc_mrtt in addition to CMD_READ
+        */
+       if (cmd == CMD_READ || pcc_mrtt) {
+               ret = check_pcc_chan();
+               if (pcc_mrtt)
+                       last_cmd_cmpl_time = ktime_get();
        }
 
-       mbox_client_txdone(pcc_channel, result);
-       return result;
+       mbox_client_txdone(pcc_channel, ret);
+       return ret;
 }
 
 static void cppc_chan_tx_done(struct mbox_client *cl, void *msg, int ret)
 {
-       if (ret)
+       if (ret < 0)
                pr_debug("TX did not complete: CMD sent:%x, ret:%d\n",
                                *(u16 *)msg, ret);
        else
@@ -306,6 +389,7 @@ static int register_pcc_channel(int pcc_subspace_idx)
 {
        struct acpi_pcct_hw_reduced *cppc_ss;
        unsigned int len;
+       u64 usecs_lat;
 
        if (pcc_subspace_idx >= 0) {
                pcc_channel = pcc_mbox_request_channel(&cppc_mbox_cl,
@@ -335,7 +419,16 @@ static int register_pcc_channel(int pcc_subspace_idx)
                 */
                comm_base_addr = cppc_ss->base_address;
                len = cppc_ss->length;
-               pcc_cmd_delay = cppc_ss->min_turnaround_time;
+
+               /*
+                * cppc_ss->latency is just a Nominal value. In reality
+                * the remote processor could be much slower to reply.
+                * So add an arbitrary amount of wait on top of Nominal.
+                */
+               usecs_lat = NUM_RETRIES * cppc_ss->latency;
+               deadline = ns_to_ktime(usecs_lat * NSEC_PER_USEC);
+               pcc_mrtt = cppc_ss->min_turnaround_time;
+               pcc_mpar = cppc_ss->max_access_rate;
 
                pcc_comm_addr = acpi_os_ioremap(comm_base_addr, len);
                if (!pcc_comm_addr) {
@@ -546,29 +639,74 @@ void acpi_cppc_processor_exit(struct acpi_processor *pr)
 }
 EXPORT_SYMBOL_GPL(acpi_cppc_processor_exit);
 
-static u64 get_phys_addr(struct cpc_reg *reg)
-{
-       /* PCC communication addr space begins at byte offset 0x8. */
-       if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM)
-               return (u64)comm_base_addr + 0x8 + reg->address;
-       else
-               return reg->address;
-}
+/*
+ * Since cpc_read and cpc_write are called while holding pcc_lock, it should be
+ * as fast as possible. We have already mapped the PCC subspace during init, so
+ * we can directly write to it.
+ */
 
-static void cpc_read(struct cpc_reg *reg, u64 *val)
+static int cpc_read(struct cpc_reg *reg, u64 *val)
 {
-       u64 addr = get_phys_addr(reg);
+       int ret_val = 0;
 
-       acpi_os_read_memory((acpi_physical_address)addr,
-                       val, reg->bit_width);
+       *val = 0;
+       if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM) {
+               void __iomem *vaddr = GET_PCC_VADDR(reg->address);
+
+               switch (reg->bit_width) {
+               case 8:
+                       *val = readb_relaxed(vaddr);
+                       break;
+               case 16:
+                       *val = readw_relaxed(vaddr);
+                       break;
+               case 32:
+                       *val = readl_relaxed(vaddr);
+                       break;
+               case 64:
+                       *val = readq_relaxed(vaddr);
+                       break;
+               default:
+                       pr_debug("Error: Cannot read %u bit width from PCC\n",
+                               reg->bit_width);
+                       ret_val = -EFAULT;
+               }
+       } else
+               ret_val = acpi_os_read_memory((acpi_physical_address)reg->address,
+                                       val, reg->bit_width);
+       return ret_val;
 }
 
-static void cpc_write(struct cpc_reg *reg, u64 val)
+static int cpc_write(struct cpc_reg *reg, u64 val)
 {
-       u64 addr = get_phys_addr(reg);
+       int ret_val = 0;
 
-       acpi_os_write_memory((acpi_physical_address)addr,
-                       val, reg->bit_width);
+       if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM) {
+               void __iomem *vaddr = GET_PCC_VADDR(reg->address);
+
+               switch (reg->bit_width) {
+               case 8:
+                       writeb_relaxed(val, vaddr);
+                       break;
+               case 16:
+                       writew_relaxed(val, vaddr);
+                       break;
+               case 32:
+                       writel_relaxed(val, vaddr);
+                       break;
+               case 64:
+                       writeq_relaxed(val, vaddr);
+                       break;
+               default:
+                       pr_debug("Error: Cannot write %u bit width to PCC\n",
+                               reg->bit_width);
+                       ret_val = -EFAULT;
+                       break;
+               }
+       } else
+               ret_val = acpi_os_write_memory((acpi_physical_address)reg->address,
+                               val, reg->bit_width);
+       return ret_val;
 }
 
 /**
@@ -604,7 +742,7 @@ int cppc_get_perf_caps(int cpunum, struct cppc_perf_caps *perf_caps)
                        (ref_perf->cpc_entry.reg.space_id == ACPI_ADR_SPACE_PLATFORM_COMM) ||
                        (nom_perf->cpc_entry.reg.space_id == ACPI_ADR_SPACE_PLATFORM_COMM)) {
                /* Ring doorbell once to update PCC subspace */
-               if (send_pcc_cmd(CMD_READ)) {
+               if (send_pcc_cmd(CMD_READ) < 0) {
                        ret = -EIO;
                        goto out_err;
                }
@@ -662,7 +800,7 @@ int cppc_get_perf_ctrs(int cpunum, struct cppc_perf_fb_ctrs *perf_fb_ctrs)
        if ((delivered_reg->cpc_entry.reg.space_id == ACPI_ADR_SPACE_PLATFORM_COMM) ||
                        (reference_reg->cpc_entry.reg.space_id == ACPI_ADR_SPACE_PLATFORM_COMM)) {
                /* Ring doorbell once to update PCC subspace */
-               if (send_pcc_cmd(CMD_READ)) {
+               if (send_pcc_cmd(CMD_READ) < 0) {
                        ret = -EIO;
                        goto out_err;
                }
@@ -713,6 +851,13 @@ int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls)
 
        spin_lock(&pcc_lock);
 
+       /* If this is PCC reg, check if channel is free before writing */
+       if (desired_reg->cpc_entry.reg.space_id == ACPI_ADR_SPACE_PLATFORM_COMM) {
+               ret = check_pcc_chan();
+               if (ret)
+                       goto busy_channel;
+       }
+
        /*
         * Skip writing MIN/MAX until Linux knows how to come up with
         * useful values.
@@ -722,10 +867,10 @@ int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls)
        /* Is this a PCC reg ?*/
        if (desired_reg->cpc_entry.reg.space_id == ACPI_ADR_SPACE_PLATFORM_COMM) {
                /* Ring doorbell so Remote can get our perf request. */
-               if (send_pcc_cmd(CMD_WRITE))
+               if (send_pcc_cmd(CMD_WRITE) < 0)
                        ret = -EIO;
        }
-
+busy_channel:
        spin_unlock(&pcc_lock);
 
        return ret;
index 1e6833a5cd44b6db55b5c9e376eceabfb44b30e0..a37508ef66c1b544dbf2a677c7a4c7608022cd40 100644 (file)
@@ -20,6 +20,7 @@
 
 #define PREFIX "ACPI: "
 
+void acpi_initrd_initialize_tables(void);
 acpi_status acpi_os_initialize1(void);
 void init_acpi_device_notify(void);
 int acpi_scan_init(void);
@@ -29,6 +30,11 @@ void acpi_processor_init(void);
 void acpi_platform_init(void);
 void acpi_pnp_init(void);
 void acpi_int340x_thermal_init(void);
+#ifdef CONFIG_ARM_AMBA
+void acpi_amba_init(void);
+#else
+static inline void acpi_amba_init(void) {}
+#endif
 int acpi_sysfs_init(void);
 void acpi_container_init(void);
 void acpi_memory_hotplug_init(void);
@@ -106,6 +112,7 @@ bool acpi_device_is_present(struct acpi_device *adev);
 bool acpi_device_is_battery(struct acpi_device *adev);
 bool acpi_device_is_first_physical_node(struct acpi_device *adev,
                                        const struct device *dev);
+struct device *acpi_get_first_physical_node(struct acpi_device *adev);
 
 /* --------------------------------------------------------------------------
                      Device Matching and Notification
index 67da6fb722740bf9f540c2a7d603460ceb0e2e8f..814d5f83b75e1e73385b59f753c733326957696f 100644 (file)
@@ -602,6 +602,14 @@ acpi_os_predefined_override(const struct acpi_predefined_names *init_val,
        return AE_OK;
 }
 
+static void acpi_table_taint(struct acpi_table_header *table)
+{
+       pr_warn(PREFIX
+               "Override [%4.4s-%8.8s], this is unsafe: tainting kernel\n",
+               table->signature, table->oem_table_id);
+       add_taint(TAINT_OVERRIDDEN_ACPI_TABLE, LOCKDEP_NOW_UNRELIABLE);
+}
+
 #ifdef CONFIG_ACPI_INITRD_TABLE_OVERRIDE
 #include <linux/earlycpio.h>
 #include <linux/memblock.h>
@@ -636,6 +644,7 @@ static const char * const table_sigs[] = {
 
 #define ACPI_OVERRIDE_TABLES 64
 static struct cpio_data __initdata acpi_initrd_files[ACPI_OVERRIDE_TABLES];
+static DECLARE_BITMAP(acpi_initrd_installed, ACPI_OVERRIDE_TABLES);
 
 #define MAP_CHUNK_SIZE   (NR_FIX_BTMAPS << PAGE_SHIFT)
 
@@ -746,96 +755,125 @@ void __init acpi_initrd_override(void *data, size_t size)
                }
        }
 }
-#endif /* CONFIG_ACPI_INITRD_TABLE_OVERRIDE */
 
-static void acpi_table_taint(struct acpi_table_header *table)
+acpi_status
+acpi_os_physical_table_override(struct acpi_table_header *existing_table,
+                               acpi_physical_address *address, u32 *length)
 {
-       pr_warn(PREFIX
-               "Override [%4.4s-%8.8s], this is unsafe: tainting kernel\n",
-               table->signature, table->oem_table_id);
-       add_taint(TAINT_OVERRIDDEN_ACPI_TABLE, LOCKDEP_NOW_UNRELIABLE);
-}
+       int table_offset = 0;
+       int table_index = 0;
+       struct acpi_table_header *table;
+       u32 table_length;
 
+       *length = 0;
+       *address = 0;
+       if (!acpi_tables_addr)
+               return AE_OK;
 
-acpi_status
-acpi_os_table_override(struct acpi_table_header * existing_table,
-                      struct acpi_table_header ** new_table)
-{
-       if (!existing_table || !new_table)
-               return AE_BAD_PARAMETER;
+       while (table_offset + ACPI_HEADER_SIZE <= all_tables_size) {
+               table = acpi_os_map_memory(acpi_tables_addr + table_offset,
+                                          ACPI_HEADER_SIZE);
+               if (table_offset + table->length > all_tables_size) {
+                       acpi_os_unmap_memory(table, ACPI_HEADER_SIZE);
+                       WARN_ON(1);
+                       return AE_OK;
+               }
 
-       *new_table = NULL;
+               table_length = table->length;
 
-#ifdef CONFIG_ACPI_CUSTOM_DSDT
-       if (strncmp(existing_table->signature, "DSDT", 4) == 0)
-               *new_table = (struct acpi_table_header *)AmlCode;
-#endif
-       if (*new_table != NULL)
+               /* Only override tables matched */
+               if (test_bit(table_index, acpi_initrd_installed) ||
+                   memcmp(existing_table->signature, table->signature, 4) ||
+                   memcmp(table->oem_table_id, existing_table->oem_table_id,
+                          ACPI_OEM_TABLE_ID_SIZE)) {
+                       acpi_os_unmap_memory(table, ACPI_HEADER_SIZE);
+                       goto next_table;
+               }
+
+               *length = table_length;
+               *address = acpi_tables_addr + table_offset;
                acpi_table_taint(existing_table);
+               acpi_os_unmap_memory(table, ACPI_HEADER_SIZE);
+               set_bit(table_index, acpi_initrd_installed);
+               break;
+
+next_table:
+               table_offset += table_length;
+               table_index++;
+       }
        return AE_OK;
 }
 
-acpi_status
-acpi_os_physical_table_override(struct acpi_table_header *existing_table,
-                               acpi_physical_address *address,
-                               u32 *table_length)
+void __init acpi_initrd_initialize_tables(void)
 {
-#ifndef CONFIG_ACPI_INITRD_TABLE_OVERRIDE
-       *table_length = 0;
-       *address = 0;
-       return AE_OK;
-#else
        int table_offset = 0;
+       int table_index = 0;
+       u32 table_length;
        struct acpi_table_header *table;
 
-       *table_length = 0;
-       *address = 0;
-
        if (!acpi_tables_addr)
-               return AE_OK;
-
-       do {
-               if (table_offset + ACPI_HEADER_SIZE > all_tables_size) {
-                       WARN_ON(1);
-                       return AE_OK;
-               }
+               return;
 
+       while (table_offset + ACPI_HEADER_SIZE <= all_tables_size) {
                table = acpi_os_map_memory(acpi_tables_addr + table_offset,
                                           ACPI_HEADER_SIZE);
-
                if (table_offset + table->length > all_tables_size) {
                        acpi_os_unmap_memory(table, ACPI_HEADER_SIZE);
                        WARN_ON(1);
-                       return AE_OK;
+                       return;
                }
 
-               table_offset += table->length;
+               table_length = table->length;
 
-               if (memcmp(existing_table->signature, table->signature, 4)) {
-                       acpi_os_unmap_memory(table,
-                                    ACPI_HEADER_SIZE);
-                       continue;
-               }
-
-               /* Only override tables with matching oem id */
-               if (memcmp(table->oem_table_id, existing_table->oem_table_id,
-                          ACPI_OEM_TABLE_ID_SIZE)) {
-                       acpi_os_unmap_memory(table,
-                                    ACPI_HEADER_SIZE);
-                       continue;
+               /* Skip RSDT/XSDT which should only be used for override */
+               if (test_bit(table_index, acpi_initrd_installed) ||
+                   ACPI_COMPARE_NAME(table->signature, ACPI_SIG_RSDT) ||
+                   ACPI_COMPARE_NAME(table->signature, ACPI_SIG_XSDT)) {
+                       acpi_os_unmap_memory(table, ACPI_HEADER_SIZE);
+                       goto next_table;
                }
 
-               table_offset -= table->length;
-               *table_length = table->length;
+               acpi_table_taint(table);
                acpi_os_unmap_memory(table, ACPI_HEADER_SIZE);
-               *address = acpi_tables_addr + table_offset;
-               break;
-       } while (table_offset + ACPI_HEADER_SIZE < all_tables_size);
+               acpi_install_table(acpi_tables_addr + table_offset, TRUE);
+               set_bit(table_index, acpi_initrd_installed);
+next_table:
+               table_offset += table_length;
+               table_index++;
+       }
+}
+#else
+acpi_status
+acpi_os_physical_table_override(struct acpi_table_header *existing_table,
+                               acpi_physical_address *address,
+                               u32 *table_length)
+{
+       *table_length = 0;
+       *address = 0;
+       return AE_OK;
+}
+
+void __init acpi_initrd_initialize_tables(void)
+{
+}
+#endif /* CONFIG_ACPI_INITRD_TABLE_OVERRIDE */
 
-       if (*address != 0)
+acpi_status
+acpi_os_table_override(struct acpi_table_header *existing_table,
+                      struct acpi_table_header **new_table)
+{
+       if (!existing_table || !new_table)
+               return AE_BAD_PARAMETER;
+
+       *new_table = NULL;
+
+#ifdef CONFIG_ACPI_CUSTOM_DSDT
+       if (strncmp(existing_table->signature, "DSDT", 4) == 0)
+               *new_table = (struct acpi_table_header *)AmlCode;
+#endif
+       if (*new_table != NULL)
                acpi_table_taint(existing_table);
        return AE_OK;
-#endif
 }
 
 static irqreturn_t acpi_irq(int irq, void *dev_id)
index c8e169e46673ae253882872cda7a94249540c3a5..2c45dd3acc17a4f6d9248e9db7ef4b31e8dbfdb0 100644 (file)
@@ -33,6 +33,7 @@
 #include <linux/pci.h>
 #include <linux/acpi.h>
 #include <linux/slab.h>
+#include <linux/interrupt.h>
 
 #define PREFIX "ACPI: "
 
@@ -387,6 +388,23 @@ static inline int acpi_isa_register_gsi(struct pci_dev *dev)
 }
 #endif
 
+static inline bool acpi_pci_irq_valid(struct pci_dev *dev, u8 pin)
+{
+#ifdef CONFIG_X86
+       /*
+        * On x86 irq line 0xff means "unknown" or "no connection"
+        * (PCI 3.0, Section 6.2.4, footnote on page 223).
+        */
+       if (dev->irq == 0xff) {
+               dev->irq = IRQ_NOTCONNECTED;
+               dev_warn(&dev->dev, "PCI INT %c: not connected\n",
+                        pin_name(pin));
+               return false;
+       }
+#endif
+       return true;
+}
+
 int acpi_pci_irq_enable(struct pci_dev *dev)
 {
        struct acpi_prt_entry *entry;
@@ -431,11 +449,14 @@ int acpi_pci_irq_enable(struct pci_dev *dev)
        } else
                gsi = -1;
 
-       /*
-        * No IRQ known to the ACPI subsystem - maybe the BIOS / 
-        * driver reported one, then use it. Exit in any case.
-        */
        if (gsi < 0) {
+               /*
+                * No IRQ known to the ACPI subsystem - maybe the BIOS /
+                * driver reported one, then use it. Exit in any case.
+                */
+               if (!acpi_pci_irq_valid(dev, pin))
+                       return 0;
+
                if (acpi_isa_register_gsi(dev))
                        dev_warn(&dev->dev, "PCI INT %c: no GSI\n",
                                 pin_name(pin));
index 11154a330f075dc04c0fec99299d6f5000e901be..d2fa8cb82d2bd34aa4cf289cf4888ae65931757d 100644 (file)
@@ -314,7 +314,6 @@ static int __init acpi_processor_driver_init(void)
        if (result < 0)
                return result;
 
-       acpi_processor_syscore_init();
        register_hotcpu_notifier(&acpi_cpu_notifier);
        acpi_thermal_cpufreq_init();
        acpi_processor_ppc_init();
@@ -330,7 +329,6 @@ static void __exit acpi_processor_driver_exit(void)
        acpi_processor_ppc_exit();
        acpi_thermal_cpufreq_exit();
        unregister_hotcpu_notifier(&acpi_cpu_notifier);
-       acpi_processor_syscore_exit();
        driver_unregister(&acpi_processor_driver);
 }
 
index 175c86bee3a95b47934d094a491b842973e2e090..fadce354d2b7e6207ae77bd235cc71d56264fd21 100644 (file)
@@ -23,6 +23,7 @@
  *
  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  */
+#define pr_fmt(fmt) "ACPI: " fmt
 
 #include <linux/module.h>
 #include <linux/acpi.h>
@@ -30,7 +31,6 @@
 #include <linux/sched.h>       /* need_resched() */
 #include <linux/tick.h>
 #include <linux/cpuidle.h>
-#include <linux/syscore_ops.h>
 #include <acpi/processor.h>
 
 /*
@@ -43,8 +43,6 @@
 #include <asm/apic.h>
 #endif
 
-#define PREFIX "ACPI: "
-
 #define ACPI_PROCESSOR_CLASS            "processor"
 #define _COMPONENT              ACPI_PROCESSOR_COMPONENT
 ACPI_MODULE_NAME("processor_idle");
@@ -81,9 +79,9 @@ static int set_max_cstate(const struct dmi_system_id *id)
        if (max_cstate > ACPI_PROCESSOR_MAX_POWER)
                return 0;
 
-       printk(KERN_NOTICE PREFIX "%s detected - limiting to C%ld max_cstate."
-              " Override with \"processor.max_cstate=%d\"\n", id->ident,
-              (long)id->driver_data, ACPI_PROCESSOR_MAX_POWER + 1);
+       pr_notice("%s detected - limiting to C%ld max_cstate."
+                 " Override with \"processor.max_cstate=%d\"\n", id->ident,
+                 (long)id->driver_data, ACPI_PROCESSOR_MAX_POWER + 1);
 
        max_cstate = (long)id->driver_data;
 
@@ -194,42 +192,6 @@ static void lapic_timer_state_broadcast(struct acpi_processor *pr,
 
 #endif
 
-#ifdef CONFIG_PM_SLEEP
-static u32 saved_bm_rld;
-
-static int acpi_processor_suspend(void)
-{
-       acpi_read_bit_register(ACPI_BITREG_BUS_MASTER_RLD, &saved_bm_rld);
-       return 0;
-}
-
-static void acpi_processor_resume(void)
-{
-       u32 resumed_bm_rld = 0;
-
-       acpi_read_bit_register(ACPI_BITREG_BUS_MASTER_RLD, &resumed_bm_rld);
-       if (resumed_bm_rld == saved_bm_rld)
-               return;
-
-       acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_RLD, saved_bm_rld);
-}
-
-static struct syscore_ops acpi_processor_syscore_ops = {
-       .suspend = acpi_processor_suspend,
-       .resume = acpi_processor_resume,
-};
-
-void acpi_processor_syscore_init(void)
-{
-       register_syscore_ops(&acpi_processor_syscore_ops);
-}
-
-void acpi_processor_syscore_exit(void)
-{
-       unregister_syscore_ops(&acpi_processor_syscore_ops);
-}
-#endif /* CONFIG_PM_SLEEP */
-
 #if defined(CONFIG_X86)
 static void tsc_check_state(int state)
 {
@@ -351,7 +313,7 @@ static int acpi_processor_get_power_info_cst(struct acpi_processor *pr)
 
        /* There must be at least 2 elements */
        if (!cst || (cst->type != ACPI_TYPE_PACKAGE) || cst->package.count < 2) {
-               printk(KERN_ERR PREFIX "not enough elements in _CST\n");
+               pr_err("not enough elements in _CST\n");
                ret = -EFAULT;
                goto end;
        }
@@ -360,7 +322,7 @@ static int acpi_processor_get_power_info_cst(struct acpi_processor *pr)
 
        /* Validate number of power states. */
        if (count < 1 || count != cst->package.count - 1) {
-               printk(KERN_ERR PREFIX "count given by _CST is not valid\n");
+               pr_err("count given by _CST is not valid\n");
                ret = -EFAULT;
                goto end;
        }
@@ -469,11 +431,9 @@ static int acpi_processor_get_power_info_cst(struct acpi_processor *pr)
                 * (From 1 through ACPI_PROCESSOR_MAX_POWER - 1)
                 */
                if (current_count >= (ACPI_PROCESSOR_MAX_POWER - 1)) {
-                       printk(KERN_WARNING
-                              "Limiting number of power states to max (%d)\n",
-                              ACPI_PROCESSOR_MAX_POWER);
-                       printk(KERN_WARNING
-                              "Please increase ACPI_PROCESSOR_MAX_POWER if needed.\n");
+                       pr_warn("Limiting number of power states to max (%d)\n",
+                               ACPI_PROCESSOR_MAX_POWER);
+                       pr_warn("Please increase ACPI_PROCESSOR_MAX_POWER if needed.\n");
                        break;
                }
        }
@@ -1097,8 +1057,8 @@ int acpi_processor_power_init(struct acpi_processor *pr)
                        retval = cpuidle_register_driver(&acpi_idle_driver);
                        if (retval)
                                return retval;
-                       printk(KERN_DEBUG "ACPI: %s registered with cpuidle\n",
-                                       acpi_idle_driver.name);
+                       pr_debug("%s registered with cpuidle\n",
+                                acpi_idle_driver.name);
                }
 
                dev = kzalloc(sizeof(*dev), GFP_KERNEL);
index 407a3760e8de659863fb4f6cea274a78bf11ff7b..5f28cf7783490e1da56d12378d7c3fba0e52185e 100644 (file)
@@ -1930,6 +1930,7 @@ int __init acpi_scan_init(void)
        acpi_memory_hotplug_init();
        acpi_pnp_init();
        acpi_int340x_thermal_init();
+       acpi_amba_init();
 
        acpi_scan_add_handler(&generic_device_handler);
 
index 9cb975200cacba638ce84b19fbee8d9948459300..fbfcce3b5227e0c4ea90e5838f8762582978ae99 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/reboot.h>
 #include <linux/acpi.h>
 #include <linux/module.h>
+#include <linux/syscore_ops.h>
 #include <asm/io.h>
 #include <trace/events/power.h>
 
@@ -677,6 +678,39 @@ static void acpi_sleep_suspend_setup(void)
 static inline void acpi_sleep_suspend_setup(void) {}
 #endif /* !CONFIG_SUSPEND */
 
+#ifdef CONFIG_PM_SLEEP
+static u32 saved_bm_rld;
+
+static int  acpi_save_bm_rld(void)
+{
+       acpi_read_bit_register(ACPI_BITREG_BUS_MASTER_RLD, &saved_bm_rld);
+       return 0;
+}
+
+static void  acpi_restore_bm_rld(void)
+{
+       u32 resumed_bm_rld = 0;
+
+       acpi_read_bit_register(ACPI_BITREG_BUS_MASTER_RLD, &resumed_bm_rld);
+       if (resumed_bm_rld == saved_bm_rld)
+               return;
+
+       acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_RLD, saved_bm_rld);
+}
+
+static struct syscore_ops acpi_sleep_syscore_ops = {
+       .suspend = acpi_save_bm_rld,
+       .resume = acpi_restore_bm_rld,
+};
+
+void acpi_sleep_syscore_init(void)
+{
+       register_syscore_ops(&acpi_sleep_syscore_ops);
+}
+#else
+static inline void acpi_sleep_syscore_init(void) {}
+#endif /* CONFIG_PM_SLEEP */
+
 #ifdef CONFIG_HIBERNATION
 static unsigned long s4_hardware_signature;
 static struct acpi_table_facs *facs;
@@ -839,6 +873,7 @@ int __init acpi_sleep_init(void)
 
        sleep_states[ACPI_STATE_S0] = 1;
 
+       acpi_sleep_syscore_init();
        acpi_sleep_suspend_setup();
        acpi_sleep_hibernate_setup();
 
index 6c0f0794aa82bd007a14effaa21494043e3eb8e1..57c0a4525dba26f908f108b17bebf2627bbb4239 100644 (file)
@@ -32,6 +32,7 @@
 #include <linux/errno.h>
 #include <linux/acpi.h>
 #include <linux/bootmem.h>
+#include "internal.h"
 
 #define ACPI_MAX_TABLES                128
 
@@ -456,6 +457,7 @@ int __init acpi_table_init(void)
        status = acpi_initialize_tables(initial_tables, ACPI_MAX_TABLES, 0);
        if (ACPI_FAILURE(status))
                return -EINVAL;
+       acpi_initrd_initialize_tables();
 
        check_multiple_madt();
        return 0;
index c359351d50f1c99e9758b74ea84d97e76b402b0f..a163f2c59aa36571359e75661d52cfdc55d4ef3b 100644 (file)
@@ -218,7 +218,7 @@ bool fwnode_property_present(struct fwnode_handle *fwnode, const char *propname)
        bool ret;
 
        ret = __fwnode_property_present(fwnode, propname);
-       if (ret == false && fwnode && fwnode->secondary)
+       if (ret == false && fwnode && !IS_ERR_OR_NULL(fwnode->secondary))
                ret = __fwnode_property_present(fwnode->secondary, propname);
        return ret;
 }
@@ -423,7 +423,7 @@ EXPORT_SYMBOL_GPL(device_property_match_string);
        int _ret_;                                                                      \
        _ret_ = FWNODE_PROP_READ(_fwnode_, _propname_, _type_, _proptype_,              \
                                 _val_, _nval_);                                        \
-       if (_ret_ == -EINVAL && _fwnode_ && _fwnode_->secondary)                        \
+       if (_ret_ == -EINVAL && _fwnode_ && !IS_ERR_OR_NULL(_fwnode_->secondary))       \
                _ret_ = FWNODE_PROP_READ(_fwnode_->secondary, _propname_, _type_,       \
                                _proptype_, _val_, _nval_);                             \
        _ret_;                                                                          \
@@ -593,7 +593,7 @@ int fwnode_property_read_string_array(struct fwnode_handle *fwnode,
        int ret;
 
        ret = __fwnode_property_read_string_array(fwnode, propname, val, nval);
-       if (ret == -EINVAL && fwnode && fwnode->secondary)
+       if (ret == -EINVAL && fwnode && !IS_ERR_OR_NULL(fwnode->secondary))
                ret = __fwnode_property_read_string_array(fwnode->secondary,
                                                          propname, val, nval);
        return ret;
@@ -621,7 +621,7 @@ int fwnode_property_read_string(struct fwnode_handle *fwnode,
        int ret;
 
        ret = __fwnode_property_read_string(fwnode, propname, val);
-       if (ret == -EINVAL && fwnode && fwnode->secondary)
+       if (ret == -EINVAL && fwnode && !IS_ERR_OR_NULL(fwnode->secondary))
                ret = __fwnode_property_read_string(fwnode->secondary,
                                                    propname, val);
        return ret;
index 64f5d1bdbb48defdddc4c7ed7ee0c5b988b92e1b..8e304b1befc56385b8fc34d7697a529289d8ae3b 100644 (file)
 #define AT_XDMAC_MAX_CHAN      0x20
 #define AT_XDMAC_MAX_CSIZE     16      /* 16 data */
 #define AT_XDMAC_MAX_DWIDTH    8       /* 64 bits */
+#define AT_XDMAC_RESIDUE_MAX_RETRIES   5
 
 #define AT_XDMAC_DMA_BUSWIDTHS\
        (BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) |\
@@ -1395,8 +1396,8 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
        struct at_xdmac_desc    *desc, *_desc;
        struct list_head        *descs_list;
        enum dma_status         ret;
-       int                     residue;
-       u32                     cur_nda, mask, value;
+       int                     residue, retry;
+       u32                     cur_nda, check_nda, cur_ubc, mask, value;
        u8                      dwidth = 0;
        unsigned long           flags;
 
@@ -1433,7 +1434,42 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
                        cpu_relax();
        }
 
+       /*
+        * When processing the residue, we need to read two registers but we
+        * can't do it in an atomic way. AT_XDMAC_CNDA is used to find where
+        * we stand in the descriptor list and AT_XDMAC_CUBC is used
+        * to know how many data are remaining for the current descriptor.
+        * Since the dma channel is not paused to not loose data, between the
+        * AT_XDMAC_CNDA and AT_XDMAC_CUBC read, we may have change of
+        * descriptor.
+        * For that reason, after reading AT_XDMAC_CUBC, we check if we are
+        * still using the same descriptor by reading a second time
+        * AT_XDMAC_CNDA. If AT_XDMAC_CNDA has changed, it means we have to
+        * read again AT_XDMAC_CUBC.
+        * Memory barriers are used to ensure the read order of the registers.
+        * A max number of retries is set because unlikely it can never ends if
+        * we are transferring a lot of data with small buffers.
+        */
        cur_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc;
+       rmb();
+       cur_ubc = at_xdmac_chan_read(atchan, AT_XDMAC_CUBC);
+       for (retry = 0; retry < AT_XDMAC_RESIDUE_MAX_RETRIES; retry++) {
+               rmb();
+               check_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc;
+
+               if (likely(cur_nda == check_nda))
+                       break;
+
+               cur_nda = check_nda;
+               rmb();
+               cur_ubc = at_xdmac_chan_read(atchan, AT_XDMAC_CUBC);
+       }
+
+       if (unlikely(retry >= AT_XDMAC_RESIDUE_MAX_RETRIES)) {
+               ret = DMA_ERROR;
+               goto spin_unlock;
+       }
+
        /*
         * Remove size of all microblocks already transferred and the current
         * one. Then add the remaining size to transfer of the current
@@ -1446,7 +1482,7 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
                if ((desc->lld.mbr_nda & 0xfffffffc) == cur_nda)
                        break;
        }
-       residue += at_xdmac_chan_read(atchan, AT_XDMAC_CUBC) << dwidth;
+       residue += cur_ubc << dwidth;
 
        dma_set_residue(txstate, residue);
 
index 2209f75fdf05bf29114f0fb3ffedff89f55b43f3..aac85c30c2cf6fc64669841c5b6abb5317df1b94 100644 (file)
@@ -522,6 +522,8 @@ static dma_cookie_t fsldma_run_tx_complete_actions(struct fsldma_chan *chan,
                        chan_dbg(chan, "LD %p callback\n", desc);
                        txd->callback(txd->callback_param);
                }
+
+               dma_descriptor_unmap(txd);
        }
 
        /* Run any dependencies */
index e438ee5b433f3f498cba20df890730a3fc77220c..f5c6b97c89588902c40b192109f7f0fb44da153a 100644 (file)
@@ -1574,7 +1574,7 @@ static int knl_get_dimm_capacity(struct sbridge_pvt *pvt, u64 *mc_sizes)
                                for (cha = 0; cha < KNL_MAX_CHAS; cha++) {
                                        if (knl_get_mc_route(target,
                                                mc_route_reg[cha]) == channel
-                                               && participants[channel]) {
+                                               && !participants[channel]) {
                                                participant_count++;
                                                participants[channel] = 1;
                                                break;
index 8297bc319369d6e4e4dd1579195ca1b6161a57d1..1846d65b72859284b31c536dc2fdc1a29cae0d9e 100644 (file)
@@ -96,7 +96,7 @@ static void amdgpu_flip_work_func(struct work_struct *__work)
         * In practice this won't execute very often unless on very fast
         * machines because the time window for this to happen is very small.
         */
-       while (amdgpuCrtc->enabled && repcnt--) {
+       while (amdgpuCrtc->enabled && --repcnt) {
                /* GET_DISTANCE_TO_VBLANKSTART returns distance to real vblank
                 * start in hpos, and to the "fudged earlier" vblank start in
                 * vpos.
@@ -112,13 +112,13 @@ static void amdgpu_flip_work_func(struct work_struct *__work)
                        break;
 
                /* Sleep at least until estimated real start of hw vblank */
-               spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
                min_udelay = (-hpos + 1) * max(vblank->linedur_ns / 1000, 5);
                if (min_udelay > vblank->framedur_ns / 2000) {
                        /* Don't wait ridiculously long - something is wrong */
                        repcnt = 0;
                        break;
                }
+               spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
                usleep_range(min_udelay, 2 * min_udelay);
                spin_lock_irqsave(&crtc->dev->event_lock, flags);
        };
index 21aacc1f45c1fd7afded7f6088c8a365e9005224..bf731e9f643e9ecddd367034179fae6a40d294ce 100644 (file)
@@ -265,15 +265,27 @@ static int amdgpu_atombios_dp_get_dp_link_config(struct drm_connector *connector
        unsigned max_lane_num = drm_dp_max_lane_count(dpcd);
        unsigned lane_num, i, max_pix_clock;
 
-       for (lane_num = 1; lane_num <= max_lane_num; lane_num <<= 1) {
-               for (i = 0; i < ARRAY_SIZE(link_rates) && link_rates[i] <= max_link_rate; i++) {
-                       max_pix_clock = (lane_num * link_rates[i] * 8) / bpp;
+       if (amdgpu_connector_encoder_get_dp_bridge_encoder_id(connector) ==
+           ENCODER_OBJECT_ID_NUTMEG) {
+               for (lane_num = 1; lane_num <= max_lane_num; lane_num <<= 1) {
+                       max_pix_clock = (lane_num * 270000 * 8) / bpp;
                        if (max_pix_clock >= pix_clock) {
                                *dp_lanes = lane_num;
-                               *dp_rate = link_rates[i];
+                               *dp_rate = 270000;
                                return 0;
                        }
                }
+       } else {
+               for (lane_num = 1; lane_num <= max_lane_num; lane_num <<= 1) {
+                       for (i = 0; i < ARRAY_SIZE(link_rates) && link_rates[i] <= max_link_rate; i++) {
+                               max_pix_clock = (lane_num * link_rates[i] * 8) / bpp;
+                               if (max_pix_clock >= pix_clock) {
+                                       *dp_lanes = lane_num;
+                                       *dp_rate = link_rates[i];
+                                       return 0;
+                               }
+                       }
+               }
        }
 
        return -EINVAL;
index 34e38749a8176f87001e54cb4ea8a7cc86fa744e..f8ee740c0e264d488b937503745ac9de06bbc9b1 100644 (file)
@@ -1382,8 +1382,16 @@ static void tda998x_connector_destroy(struct drm_connector *connector)
        drm_connector_cleanup(connector);
 }
 
+static int tda998x_connector_dpms(struct drm_connector *connector, int mode)
+{
+       if (drm_core_check_feature(connector->dev, DRIVER_ATOMIC))
+               return drm_atomic_helper_connector_dpms(connector, mode);
+       else
+               return drm_helper_connector_dpms(connector, mode);
+}
+
 static const struct drm_connector_funcs tda998x_connector_funcs = {
-       .dpms = drm_atomic_helper_connector_dpms,
+       .dpms = tda998x_connector_dpms,
        .reset = drm_atomic_helper_connector_reset,
        .fill_modes = drm_helper_probe_single_connector_modes,
        .detect = tda998x_connector_detect,
index 31f6d212fb1bbbfa1ce64efe8cebde2a5f9f575d..30f921421b0c944217832ba86856a6904f8fef11 100644 (file)
@@ -527,6 +527,8 @@ void intel_audio_codec_enable(struct intel_encoder *intel_encoder)
 
        mutex_lock(&dev_priv->av_mutex);
        intel_dig_port->audio_connector = connector;
+       /* referred in audio callbacks */
+       dev_priv->dig_port_map[port] = intel_encoder;
        mutex_unlock(&dev_priv->av_mutex);
 
        if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify)
@@ -554,6 +556,7 @@ void intel_audio_codec_disable(struct intel_encoder *intel_encoder)
 
        mutex_lock(&dev_priv->av_mutex);
        intel_dig_port->audio_connector = NULL;
+       dev_priv->dig_port_map[port] = NULL;
        mutex_unlock(&dev_priv->av_mutex);
 
        if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify)
index 0f3df2c39f7cdd3a69d8f0044cbd725e3b9bac6d..084d5586585d012891423067703fde0f9daec0b1 100644 (file)
@@ -3358,7 +3358,6 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
        intel_encoder->get_config = intel_ddi_get_config;
 
        intel_dig_port->port = port;
-       dev_priv->dig_port_map[port] = intel_encoder;
        intel_dig_port->saved_port_bits = I915_READ(DDI_BUF_CTL(port)) &
                                          (DDI_BUF_PORT_REVERSAL |
                                           DDI_A_4_LANES);
index 1d8de43bed56839fd1fee3a75b6d61b6221c7c0a..cdc2c15873dcc71189ed3263aad55e3c63493ed3 100644 (file)
@@ -6045,7 +6045,6 @@ intel_dp_init(struct drm_device *dev,
        }
 
        intel_dig_port->port = port;
-       dev_priv->dig_port_map[port] = intel_encoder;
        intel_dig_port->dp.output_reg = output_reg;
 
        intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
index cb5d1b15755c3b19a496105c7a8df99982e83cb9..616108c4bc3e5741f59c9a1066aa2df3b63d5fc5 100644 (file)
@@ -2154,7 +2154,6 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
 void intel_hdmi_init(struct drm_device *dev,
                     i915_reg_t hdmi_reg, enum port port)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_digital_port *intel_dig_port;
        struct intel_encoder *intel_encoder;
        struct intel_connector *intel_connector;
@@ -2223,7 +2222,6 @@ void intel_hdmi_init(struct drm_device *dev,
                intel_encoder->cloneable |= 1 << INTEL_OUTPUT_HDMI;
 
        intel_dig_port->port = port;
-       dev_priv->dig_port_map[port] = intel_encoder;
        intel_dig_port->hdmi.hdmi_reg = hdmi_reg;
        intel_dig_port->dp.output_reg = INVALID_MMIO_REG;
 
index deb8282c26d83f952473ae145c4fef0b3112b9f1..52fbe530fc9eac207093271a05d44dbf3515e11c 100644 (file)
@@ -664,6 +664,12 @@ int intel_setup_gmbus(struct drm_device *dev)
 
                bus->adapter.algo = &gmbus_algorithm;
 
+               /*
+                * We wish to retry with bit banging
+                * after a timed out GMBUS attempt.
+                */
+               bus->adapter.retries = 1;
+
                /* By default use a conservative clock rate */
                bus->reg0 = pin | GMBUS_RATE_100KHZ;
 
index 30a57185bdb4e41e7ff26a0d4af36577a218b0ea..287226311413c7036cecb02e973e6109d0b26548 100644 (file)
@@ -64,6 +64,7 @@ static void ipu_fb_enable(struct ipu_crtc *ipu_crtc)
        /* Start DC channel and DI after IDMAC */
        ipu_dc_enable_channel(ipu_crtc->dc);
        ipu_di_enable(ipu_crtc->di);
+       drm_crtc_vblank_on(&ipu_crtc->base);
 
        ipu_crtc->enabled = 1;
 }
@@ -80,6 +81,7 @@ static void ipu_fb_disable(struct ipu_crtc *ipu_crtc)
        ipu_di_disable(ipu_crtc->di);
        ipu_plane_disable(ipu_crtc->plane[0]);
        ipu_dc_disable(ipu);
+       drm_crtc_vblank_off(&ipu_crtc->base);
 
        ipu_crtc->enabled = 0;
 }
index 591ba2f1ae03674224b4719660d547ad4e64c0b1..26bb1b626fe3d817812e18e19e0bfb64a4247c3c 100644 (file)
@@ -42,6 +42,7 @@ static const uint32_t ipu_plane_formats[] = {
        DRM_FORMAT_YVYU,
        DRM_FORMAT_YUV420,
        DRM_FORMAT_YVU420,
+       DRM_FORMAT_RGB565,
 };
 
 int ipu_plane_irq(struct ipu_plane *ipu_plane)
index 44ee72e04df9e953bafe64cdfdadf2f01c1f9bce..6af832545bc5b76b44e836553e72005f01012bbe 100644 (file)
@@ -315,15 +315,27 @@ int radeon_dp_get_dp_link_config(struct drm_connector *connector,
        unsigned max_lane_num = drm_dp_max_lane_count(dpcd);
        unsigned lane_num, i, max_pix_clock;
 
-       for (lane_num = 1; lane_num <= max_lane_num; lane_num <<= 1) {
-               for (i = 0; i < ARRAY_SIZE(link_rates) && link_rates[i] <= max_link_rate; i++) {
-                       max_pix_clock = (lane_num * link_rates[i] * 8) / bpp;
+       if (radeon_connector_encoder_get_dp_bridge_encoder_id(connector) ==
+           ENCODER_OBJECT_ID_NUTMEG) {
+               for (lane_num = 1; lane_num <= max_lane_num; lane_num <<= 1) {
+                       max_pix_clock = (lane_num * 270000 * 8) / bpp;
                        if (max_pix_clock >= pix_clock) {
                                *dp_lanes = lane_num;
-                               *dp_rate = link_rates[i];
+                               *dp_rate = 270000;
                                return 0;
                        }
                }
+       } else {
+               for (lane_num = 1; lane_num <= max_lane_num; lane_num <<= 1) {
+                       for (i = 0; i < ARRAY_SIZE(link_rates) && link_rates[i] <= max_link_rate; i++) {
+                               max_pix_clock = (lane_num * link_rates[i] * 8) / bpp;
+                               if (max_pix_clock >= pix_clock) {
+                                       *dp_lanes = lane_num;
+                                       *dp_rate = link_rates[i];
+                                       return 0;
+                               }
+                       }
+               }
        }
 
        return -EINVAL;
index 902b59cebac584b075639a2b7d34f031907cc3d1..4197ca1bb1e4d3f5d1455cc7c282787778660e9c 100644 (file)
@@ -1744,7 +1744,6 @@ int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
        }
 
        drm_kms_helper_poll_enable(dev);
-       drm_helper_hpd_irq_event(dev);
 
        /* set the power state here in case we are a PX system or headless */
        if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled)
index 2b9ba03a7c1a84bc00564fe0f6391771977c2e7b..2d9196a447fdc94a49140dcad365b6d3957d43f6 100644 (file)
@@ -455,7 +455,7 @@ static void radeon_flip_work_func(struct work_struct *__work)
         * In practice this won't execute very often unless on very fast
         * machines because the time window for this to happen is very small.
         */
-       while (radeon_crtc->enabled && repcnt--) {
+       while (radeon_crtc->enabled && --repcnt) {
                /* GET_DISTANCE_TO_VBLANKSTART returns distance to real vblank
                 * start in hpos, and to the "fudged earlier" vblank start in
                 * vpos.
@@ -471,13 +471,13 @@ static void radeon_flip_work_func(struct work_struct *__work)
                        break;
 
                /* Sleep at least until estimated real start of hw vblank */
-               spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
                min_udelay = (-hpos + 1) * max(vblank->linedur_ns / 1000, 5);
                if (min_udelay > vblank->framedur_ns / 2000) {
                        /* Don't wait ridiculously long - something is wrong */
                        repcnt = 0;
                        break;
                }
+               spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
                usleep_range(min_udelay, 2 * min_udelay);
                spin_lock_irqsave(&crtc->dev->event_lock, flags);
        };
index 0f14d897baf9b32974a97eaa59153363e3d8c97b..7a98823bacd1cd5da33c9e11dc7f798ae1a866d9 100644 (file)
@@ -1079,6 +1079,8 @@ force:
 
        /* update display watermarks based on new power state */
        radeon_bandwidth_update(rdev);
+       /* update displays */
+       radeon_dpm_display_configuration_changed(rdev);
 
        /* wait for the rings to drain */
        for (i = 0; i < RADEON_NUM_RINGS; i++) {
@@ -1095,9 +1097,6 @@ force:
 
        radeon_dpm_post_set_power_state(rdev);
 
-       /* update displays */
-       radeon_dpm_display_configuration_changed(rdev);
-
        rdev->pm.dpm.current_active_crtcs = rdev->pm.dpm.new_active_crtcs;
        rdev->pm.dpm.current_active_crtc_count = rdev->pm.dpm.new_active_crtc_count;
        rdev->pm.dpm.single_display = single_display;
index db082bea8dafd023fa6ad19719f7fadf5ebfcad5..c5a1a08b0449004670b79947e68f0ba7fb8afb50 100644 (file)
@@ -563,6 +563,8 @@ static void vmw_sou_connector_destroy(struct drm_connector *connector)
 
 static const struct drm_connector_funcs vmw_sou_connector_funcs = {
        .dpms = vmw_du_connector_dpms,
+       .detect = vmw_du_connector_detect,
+       .fill_modes = vmw_du_connector_fill_modes,
        .set_property = vmw_du_connector_set_property,
        .destroy = vmw_sou_connector_destroy,
 };
index f2e13eb8339ffc1287110e247e0e54e922b7d72a..e00db3f510dd425c62565d913c937c5638a072d5 100644 (file)
@@ -1050,6 +1050,17 @@ static int ipu_add_client_devices(struct ipu_soc *ipu, unsigned long ipu_base)
        for (i = 0; i < ARRAY_SIZE(client_reg); i++) {
                const struct ipu_platform_reg *reg = &client_reg[i];
                struct platform_device *pdev;
+               struct device_node *of_node;
+
+               /* Associate subdevice with the corresponding port node */
+               of_node = of_graph_get_port_by_id(dev->of_node, i);
+               if (!of_node) {
+                       dev_info(dev,
+                                "no port@%d node in %s, not using %s%d\n",
+                                i, dev->of_node->full_name,
+                                (i / 2) ? "DI" : "CSI", i % 2);
+                       continue;
+               }
 
                pdev = platform_device_alloc(reg->name, id++);
                if (!pdev) {
@@ -1057,17 +1068,9 @@ static int ipu_add_client_devices(struct ipu_soc *ipu, unsigned long ipu_base)
                        goto err_register;
                }
 
+               pdev->dev.of_node = of_node;
                pdev->dev.parent = dev;
 
-               /* Associate subdevice with the corresponding port node */
-               pdev->dev.of_node = of_graph_get_port_by_id(dev->of_node, i);
-               if (!pdev->dev.of_node) {
-                       dev_err(dev, "missing port@%d node in %s\n", i,
-                               dev->of_node->full_name);
-                       ret = -ENODEV;
-                       goto err_register;
-               }
-
                ret = platform_device_add_data(pdev, &reg->pdata,
                                               sizeof(reg->pdata));
                if (!ret)
@@ -1289,10 +1292,6 @@ static int ipu_probe(struct platform_device *pdev)
        ipu->irq_sync = irq_sync;
        ipu->irq_err = irq_err;
 
-       ret = ipu_irq_init(ipu);
-       if (ret)
-               goto out_failed_irq;
-
        ret = device_reset(&pdev->dev);
        if (ret) {
                dev_err(&pdev->dev, "failed to reset: %d\n", ret);
@@ -1302,6 +1301,10 @@ static int ipu_probe(struct platform_device *pdev)
        if (ret)
                goto out_failed_reset;
 
+       ret = ipu_irq_init(ipu);
+       if (ret)
+               goto out_failed_irq;
+
        /* Set MCU_T to divide MCU access window into 2 */
        ipu_cm_write(ipu, 0x00400000L | (IPU_MCU_T_DEFAULT << 18),
                        IPU_DISP_GEN);
@@ -1324,9 +1327,9 @@ static int ipu_probe(struct platform_device *pdev)
 failed_add_clients:
        ipu_submodules_exit(ipu);
 failed_submodules_init:
-out_failed_reset:
        ipu_irq_exit(ipu);
 out_failed_irq:
+out_failed_reset:
        clk_disable_unprepare(ipu->clk);
        return ret;
 }
index 438f1b4964c08024c32e55e831f028f657db0e7d..d656657b805c20dafdfc28925c09179705789eae 100644 (file)
@@ -123,6 +123,7 @@ static const struct acpi_device_id dw_i2c_acpi_match[] = {
        { "80860F41", 0 },
        { "808622C1", 0 },
        { "AMD0010", ACCESS_INTR_MASK },
+       { "AMDI0010", ACCESS_INTR_MASK },
        { "AMDI0510", 0 },
        { "APMC0D0F", 0 },
        { }
index 8f779a1ec99c4b248c9721fceef19b579432539b..0ddf638d60f3645c4a083b37027625dfd850b97f 100644 (file)
@@ -63,6 +63,7 @@
 #include <linux/platform_device.h>
 #include <linux/mailbox_controller.h>
 #include <linux/mailbox_client.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
 
 #include "mailbox.h"
 
@@ -70,6 +71,9 @@
 
 static struct mbox_chan *pcc_mbox_channels;
 
+/* Array of cached virtual address for doorbell registers */
+static void __iomem **pcc_doorbell_vaddr;
+
 static struct mbox_controller pcc_mbox_ctrl = {};
 /**
  * get_pcc_channel - Given a PCC subspace idx, get
@@ -160,6 +164,66 @@ void pcc_mbox_free_channel(struct mbox_chan *chan)
 }
 EXPORT_SYMBOL_GPL(pcc_mbox_free_channel);
 
+/*
+ * PCC can be used with perf critical drivers such as CPPC
+ * So it makes sense to locally cache the virtual address and
+ * use it to read/write to PCC registers such as doorbell register
+ *
+ * The below read_register and write_registers are used to read and
+ * write from perf critical registers such as PCC doorbell register
+ */
+static int read_register(void __iomem *vaddr, u64 *val, unsigned int bit_width)
+{
+       int ret_val = 0;
+
+       switch (bit_width) {
+       case 8:
+               *val = readb(vaddr);
+               break;
+       case 16:
+               *val = readw(vaddr);
+               break;
+       case 32:
+               *val = readl(vaddr);
+               break;
+       case 64:
+               *val = readq(vaddr);
+               break;
+       default:
+               pr_debug("Error: Cannot read register of %u bit width",
+                       bit_width);
+               ret_val = -EFAULT;
+               break;
+       }
+       return ret_val;
+}
+
+static int write_register(void __iomem *vaddr, u64 val, unsigned int bit_width)
+{
+       int ret_val = 0;
+
+       switch (bit_width) {
+       case 8:
+               writeb(val, vaddr);
+               break;
+       case 16:
+               writew(val, vaddr);
+               break;
+       case 32:
+               writel(val, vaddr);
+               break;
+       case 64:
+               writeq(val, vaddr);
+               break;
+       default:
+               pr_debug("Error: Cannot write register of %u bit width",
+                       bit_width);
+               ret_val = -EFAULT;
+               break;
+       }
+       return ret_val;
+}
+
 /**
  * pcc_send_data - Called from Mailbox Controller code. Used
  *             here only to ring the channel doorbell. The PCC client
@@ -175,21 +239,39 @@ EXPORT_SYMBOL_GPL(pcc_mbox_free_channel);
 static int pcc_send_data(struct mbox_chan *chan, void *data)
 {
        struct acpi_pcct_hw_reduced *pcct_ss = chan->con_priv;
-       struct acpi_generic_address doorbell;
+       struct acpi_generic_address *doorbell;
        u64 doorbell_preserve;
        u64 doorbell_val;
        u64 doorbell_write;
+       u32 id = chan - pcc_mbox_channels;
+       int ret = 0;
+
+       if (id >= pcc_mbox_ctrl.num_chans) {
+               pr_debug("pcc_send_data: Invalid mbox_chan passed\n");
+               return -ENOENT;
+       }
 
-       doorbell = pcct_ss->doorbell_register;
+       doorbell = &pcct_ss->doorbell_register;
        doorbell_preserve = pcct_ss->preserve_mask;
        doorbell_write = pcct_ss->write_mask;
 
        /* Sync notification from OS to Platform. */
-       acpi_read(&doorbell_val, &doorbell);
-       acpi_write((doorbell_val & doorbell_preserve) | doorbell_write,
-                       &doorbell);
-
-       return 0;
+       if (pcc_doorbell_vaddr[id]) {
+               ret = read_register(pcc_doorbell_vaddr[id], &doorbell_val,
+                       doorbell->bit_width);
+               if (ret)
+                       return ret;
+               ret = write_register(pcc_doorbell_vaddr[id],
+                       (doorbell_val & doorbell_preserve) | doorbell_write,
+                       doorbell->bit_width);
+       } else {
+               ret = acpi_read(&doorbell_val, doorbell);
+               if (ret)
+                       return ret;
+               ret = acpi_write((doorbell_val & doorbell_preserve) | doorbell_write,
+                       doorbell);
+       }
+       return ret;
 }
 
 static const struct mbox_chan_ops pcc_chan_ops = {
@@ -265,14 +347,29 @@ static int __init acpi_pcc_probe(void)
                return -ENOMEM;
        }
 
+       pcc_doorbell_vaddr = kcalloc(count, sizeof(void *), GFP_KERNEL);
+       if (!pcc_doorbell_vaddr) {
+               kfree(pcc_mbox_channels);
+               return -ENOMEM;
+       }
+
        /* Point to the first PCC subspace entry */
        pcct_entry = (struct acpi_subtable_header *) (
                (unsigned long) pcct_tbl + sizeof(struct acpi_table_pcct));
 
        for (i = 0; i < count; i++) {
+               struct acpi_generic_address *db_reg;
+               struct acpi_pcct_hw_reduced *pcct_ss;
                pcc_mbox_channels[i].con_priv = pcct_entry;
                pcct_entry = (struct acpi_subtable_header *)
                        ((unsigned long) pcct_entry + pcct_entry->length);
+
+               /* If doorbell is in system memory cache the virt address */
+               pcct_ss = (struct acpi_pcct_hw_reduced *)pcct_entry;
+               db_reg = &pcct_ss->doorbell_register;
+               if (db_reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
+                       pcc_doorbell_vaddr[i] = acpi_os_ioremap(db_reg->address,
+                                                       db_reg->bit_width/8);
        }
 
        pcc_mbox_ctrl.num_chans = count;
index 7dae0ac0f3aec12c5d44875111d3a69818e43c92..e9219f528d7e8ada4876649c28b9d0c54ce68117 100644 (file)
@@ -20,6 +20,9 @@
  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  */
 
+/* We need to access legacy defines from linux/media.h */
+#define __NEED_MEDIA_LEGACY_API
+
 #include <linux/compat.h>
 #include <linux/export.h>
 #include <linux/idr.h>
@@ -115,6 +118,26 @@ static long media_device_enum_entities(struct media_device *mdev,
        u_ent.group_id = 0;             /* Unused */
        u_ent.pads = ent->num_pads;
        u_ent.links = ent->num_links - ent->num_backlinks;
+
+       /*
+        * Workaround for a bug at media-ctl <= v1.10 that makes it to
+        * do the wrong thing if the entity function doesn't belong to
+        * either MEDIA_ENT_F_OLD_BASE or MEDIA_ENT_F_OLD_SUBDEV_BASE
+        * Ranges.
+        *
+        * Non-subdevices are expected to be at the MEDIA_ENT_F_OLD_BASE,
+        * or, otherwise, will be silently ignored by media-ctl when
+        * printing the graphviz diagram. So, map them into the devnode
+        * old range.
+        */
+       if (ent->function < MEDIA_ENT_F_OLD_BASE ||
+           ent->function > MEDIA_ENT_T_DEVNODE_UNKNOWN) {
+               if (is_media_entity_v4l2_subdev(ent))
+                       u_ent.type = MEDIA_ENT_F_V4L2_SUBDEV_UNKNOWN;
+               else if (ent->function != MEDIA_ENT_F_IO_V4L)
+                       u_ent.type = MEDIA_ENT_T_DEVNODE_UNKNOWN;
+       }
+
        memcpy(&u_ent.raw, &ent->info, sizeof(ent->info));
        if (copy_to_user(uent, &u_ent, sizeof(u_ent)))
                return -EFAULT;
index 79316159eec63cbca36aa06d27bfaab561b62dc0..88b6c81cebbe68123da96775b3a42c3f96da4a11 100644 (file)
@@ -187,7 +187,7 @@ static int double_bit_error_detect(void *error_data, void *error_ecc,
        __nand_calculate_ecc(error_data, size, calc_ecc);
        ret = __nand_correct_data(error_data, error_ecc, calc_ecc, size);
 
-       return (ret == -1) ? 0 : -EINVAL;
+       return (ret == -EBADMSG) ? 0 : -EINVAL;
 }
 
 static const struct nand_ecc_test nand_ecc_test[] = {
index 575790e8a75af8e6184f5611fe133b1d73825679..74a7dfecee2783ac609b9fefbd32f7817b48c43c 100644 (file)
@@ -843,7 +843,7 @@ static irqreturn_t mcp251x_can_ist(int irq, void *dev_id)
                if (clear_intf)
                        mcp251x_write_bits(spi, CANINTF, clear_intf, 0x00);
 
-               if (eflag)
+               if (eflag & (EFLG_RX0OVR | EFLG_RX1OVR))
                        mcp251x_write_bits(spi, EFLG, eflag, 0x00);
 
                /* Update can state */
index 5eee62badf45457798c2fabe5005faa26c6286e2..cbc99d5649afa3877158a0ac37a50b8865a7e8ac 100644 (file)
@@ -826,9 +826,8 @@ static struct gs_can *gs_make_candev(unsigned int channel, struct usb_interface
 static void gs_destroy_candev(struct gs_can *dev)
 {
        unregister_candev(dev->netdev);
-       free_candev(dev->netdev);
        usb_kill_anchored_urbs(&dev->tx_submitted);
-       kfree(dev);
+       free_candev(dev->netdev);
 }
 
 static int gs_usb_probe(struct usb_interface *intf, const struct usb_device_id *id)
@@ -913,12 +912,15 @@ static int gs_usb_probe(struct usb_interface *intf, const struct usb_device_id *
        for (i = 0; i < icount; i++) {
                dev->canch[i] = gs_make_candev(i, intf);
                if (IS_ERR_OR_NULL(dev->canch[i])) {
+                       /* save error code to return later */
+                       rc = PTR_ERR(dev->canch[i]);
+
                        /* on failure destroy previously created candevs */
                        icount = i;
-                       for (i = 0; i < icount; i++) {
+                       for (i = 0; i < icount; i++)
                                gs_destroy_candev(dev->canch[i]);
-                               dev->canch[i] = NULL;
-                       }
+
+                       usb_kill_anchored_urbs(&dev->rx_submitted);
                        kfree(dev);
                        return rc;
                }
@@ -939,16 +941,12 @@ static void gs_usb_disconnect(struct usb_interface *intf)
                return;
        }
 
-       for (i = 0; i < GS_MAX_INTF; i++) {
-               struct gs_can *can = dev->canch[i];
-
-               if (!can)
-                       continue;
-
-               gs_destroy_candev(can);
-       }
+       for (i = 0; i < GS_MAX_INTF; i++)
+               if (dev->canch[i])
+                       gs_destroy_candev(dev->canch[i]);
 
        usb_kill_anchored_urbs(&dev->rx_submitted);
+       kfree(dev);
 }
 
 static const struct usb_device_id gs_usb_table[] = {
index 79e1a0282163db200916e0f862fa1560eab2fd03..17b2126075e01afde20ee2de1718dfc7db1e4641 100644 (file)
@@ -2461,7 +2461,7 @@ boomerang_interrupt(int irq, void *dev_id)
                                        int i;
                                        pci_unmap_single(VORTEX_PCI(vp),
                                                        le32_to_cpu(vp->tx_ring[entry].frag[0].addr),
-                                                       le32_to_cpu(vp->tx_ring[entry].frag[0].length),
+                                                       le32_to_cpu(vp->tx_ring[entry].frag[0].length)&0xFFF,
                                                        PCI_DMA_TODEVICE);
 
                                        for (i=1; i<=skb_shinfo(skb)->nr_frags; i++)
index 17472851674f19f949f97e04fccdc5a6fb874a3d..f749e4d389eb163132d5ab51ad7a9821c07577e4 100644 (file)
@@ -193,7 +193,6 @@ static void altera_tse_mdio_destroy(struct net_device *dev)
                            priv->mdio->id);
 
        mdiobus_unregister(priv->mdio);
-       kfree(priv->mdio->irq);
        mdiobus_free(priv->mdio);
        priv->mdio = NULL;
 }
index f71ab2647a3bc1f62f6b7450b34dd325c710549d..08a23e6b60e947894783f2115c2fe16abece84bc 100644 (file)
@@ -1460,7 +1460,19 @@ static int nb8800_probe(struct platform_device *pdev)
                goto err_disable_clk;
        }
 
-       priv->phy_node = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
+       if (of_phy_is_fixed_link(pdev->dev.of_node)) {
+               ret = of_phy_register_fixed_link(pdev->dev.of_node);
+               if (ret < 0) {
+                       dev_err(&pdev->dev, "bad fixed-link spec\n");
+                       goto err_free_bus;
+               }
+               priv->phy_node = of_node_get(pdev->dev.of_node);
+       }
+
+       if (!priv->phy_node)
+               priv->phy_node = of_parse_phandle(pdev->dev.of_node,
+                                                 "phy-handle", 0);
+
        if (!priv->phy_node) {
                dev_err(&pdev->dev, "no PHY specified\n");
                ret = -ENODEV;
index 27aa0802d87d5f46d813184649dd9107fdeb541b..91874d24fd560c2d25afbf7ab7e3251774627a5b 100644 (file)
@@ -4896,9 +4896,9 @@ struct c2s_pri_trans_table_entry {
  * cfc delete event data
  */
 struct cfc_del_event_data {
-       u32 cid;
-       u32 reserved0;
-       u32 reserved1;
+       __le32 cid;
+       __le32 reserved0;
+       __le32 reserved1;
 };
 
 
@@ -5114,15 +5114,9 @@ struct vf_pf_channel_zone_trigger {
  * zone that triggers the in-bound interrupt
  */
 struct trigger_vf_zone {
-#if defined(__BIG_ENDIAN)
-       u16 reserved1;
-       u8 reserved0;
-       struct vf_pf_channel_zone_trigger vf_pf_channel;
-#elif defined(__LITTLE_ENDIAN)
        struct vf_pf_channel_zone_trigger vf_pf_channel;
        u8 reserved0;
        u16 reserved1;
-#endif
        u32 reserved2;
 };
 
@@ -5207,9 +5201,9 @@ struct e2_integ_data {
  * set mac event data
  */
 struct eth_event_data {
-       u32 echo;
-       u32 reserved0;
-       u32 reserved1;
+       __le32 echo;
+       __le32 reserved0;
+       __le32 reserved1;
 };
 
 
@@ -5219,9 +5213,9 @@ struct eth_event_data {
 struct vf_pf_event_data {
        u8 vf_id;
        u8 reserved0;
-       u16 reserved1;
-       u32 msg_addr_lo;
-       u32 msg_addr_hi;
+       __le16 reserved1;
+       __le32 msg_addr_lo;
+       __le32 msg_addr_hi;
 };
 
 /*
@@ -5230,9 +5224,9 @@ struct vf_pf_event_data {
 struct vf_flr_event_data {
        u8 vf_id;
        u8 reserved0;
-       u16 reserved1;
-       u32 reserved2;
-       u32 reserved3;
+       __le16 reserved1;
+       __le32 reserved2;
+       __le32 reserved3;
 };
 
 /*
@@ -5241,9 +5235,9 @@ struct vf_flr_event_data {
 struct malicious_vf_event_data {
        u8 vf_id;
        u8 err_id;
-       u16 reserved1;
-       u32 reserved2;
-       u32 reserved3;
+       __le16 reserved1;
+       __le32 reserved2;
+       __le32 reserved3;
 };
 
 /*
index 6c4e3a69976fcaf4ecc0bafcebbcb2e725a40575..2bf9c871144f714c56a72e4e584aa4db431d218c 100644 (file)
@@ -5280,14 +5280,14 @@ static void bnx2x_handle_classification_eqe(struct bnx2x *bp,
 {
        unsigned long ramrod_flags = 0;
        int rc = 0;
-       u32 cid = elem->message.data.eth_event.echo & BNX2X_SWCID_MASK;
+       u32 echo = le32_to_cpu(elem->message.data.eth_event.echo);
+       u32 cid = echo & BNX2X_SWCID_MASK;
        struct bnx2x_vlan_mac_obj *vlan_mac_obj;
 
        /* Always push next commands out, don't wait here */
        __set_bit(RAMROD_CONT, &ramrod_flags);
 
-       switch (le32_to_cpu((__force __le32)elem->message.data.eth_event.echo)
-                           >> BNX2X_SWCID_SHIFT) {
+       switch (echo >> BNX2X_SWCID_SHIFT) {
        case BNX2X_FILTER_MAC_PENDING:
                DP(BNX2X_MSG_SP, "Got SETUP_MAC completions\n");
                if (CNIC_LOADED(bp) && (cid == BNX2X_ISCSI_ETH_CID(bp)))
@@ -5308,8 +5308,7 @@ static void bnx2x_handle_classification_eqe(struct bnx2x *bp,
                bnx2x_handle_mcast_eqe(bp);
                return;
        default:
-               BNX2X_ERR("Unsupported classification command: %d\n",
-                         elem->message.data.eth_event.echo);
+               BNX2X_ERR("Unsupported classification command: 0x%x\n", echo);
                return;
        }
 
@@ -5478,9 +5477,6 @@ static void bnx2x_eq_int(struct bnx2x *bp)
                        goto next_spqe;
                }
 
-               /* elem CID originates from FW; actually LE */
-               cid = SW_CID((__force __le32)
-                            elem->message.data.cfc_del_event.cid);
                opcode = elem->message.opcode;
 
                /* handle eq element */
@@ -5503,6 +5499,10 @@ static void bnx2x_eq_int(struct bnx2x *bp)
                         * we may want to verify here that the bp state is
                         * HALTING
                         */
+
+                       /* elem CID originates from FW; actually LE */
+                       cid = SW_CID(elem->message.data.cfc_del_event.cid);
+
                        DP(BNX2X_MSG_SP,
                           "got delete ramrod for MULTI[%d]\n", cid);
 
@@ -5596,10 +5596,8 @@ static void bnx2x_eq_int(struct bnx2x *bp)
                      BNX2X_STATE_OPENING_WAIT4_PORT):
                case (EVENT_RING_OPCODE_RSS_UPDATE_RULES |
                      BNX2X_STATE_CLOSING_WAIT4_HALT):
-                       cid = elem->message.data.eth_event.echo &
-                               BNX2X_SWCID_MASK;
                        DP(BNX2X_MSG_SP, "got RSS_UPDATE ramrod. CID %d\n",
-                          cid);
+                          SW_CID(elem->message.data.eth_event.echo));
                        rss_raw->clear_pending(rss_raw);
                        break;
 
@@ -5684,7 +5682,7 @@ static void bnx2x_sp_task(struct work_struct *work)
                if (status & BNX2X_DEF_SB_IDX) {
                        struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp);
 
-               if (FCOE_INIT(bp) &&
+                       if (FCOE_INIT(bp) &&
                            (bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
                                /* Prevent local bottom-halves from running as
                                 * we are going to change the local NAPI list.
index 9d027348cd09b90fcca14843f13e278d54dd89e6..632daff117d305a822f95cc1bcd84b47437b2334 100644 (file)
@@ -1672,11 +1672,12 @@ void bnx2x_vf_handle_classification_eqe(struct bnx2x *bp,
 {
        unsigned long ramrod_flags = 0;
        int rc = 0;
+       u32 echo = le32_to_cpu(elem->message.data.eth_event.echo);
 
        /* Always push next commands out, don't wait here */
        set_bit(RAMROD_CONT, &ramrod_flags);
 
-       switch (elem->message.data.eth_event.echo >> BNX2X_SWCID_SHIFT) {
+       switch (echo >> BNX2X_SWCID_SHIFT) {
        case BNX2X_FILTER_MAC_PENDING:
                rc = vfq->mac_obj.complete(bp, &vfq->mac_obj, elem,
                                           &ramrod_flags);
@@ -1686,8 +1687,7 @@ void bnx2x_vf_handle_classification_eqe(struct bnx2x *bp,
                                            &ramrod_flags);
                break;
        default:
-               BNX2X_ERR("Unsupported classification command: %d\n",
-                         elem->message.data.eth_event.echo);
+               BNX2X_ERR("Unsupported classification command: 0x%x\n", echo);
                return;
        }
        if (rc < 0)
@@ -1747,16 +1747,14 @@ int bnx2x_iov_eq_sp_event(struct bnx2x *bp, union event_ring_elem *elem)
 
        switch (opcode) {
        case EVENT_RING_OPCODE_CFC_DEL:
-               cid = SW_CID((__force __le32)
-                            elem->message.data.cfc_del_event.cid);
+               cid = SW_CID(elem->message.data.cfc_del_event.cid);
                DP(BNX2X_MSG_IOV, "checking cfc-del comp cid=%d\n", cid);
                break;
        case EVENT_RING_OPCODE_CLASSIFICATION_RULES:
        case EVENT_RING_OPCODE_MULTICAST_RULES:
        case EVENT_RING_OPCODE_FILTERS_RULES:
        case EVENT_RING_OPCODE_RSS_UPDATE_RULES:
-               cid = (elem->message.data.eth_event.echo &
-                      BNX2X_SWCID_MASK);
+               cid = SW_CID(elem->message.data.eth_event.echo);
                DP(BNX2X_MSG_IOV, "checking filtering comp cid=%d\n", cid);
                break;
        case EVENT_RING_OPCODE_VF_FLR:
index 1374e5394a7970ba20ad54ddfc6271ce09e40744..bfae300cf25ff881292dc36ad56e51e37132cd76 100644 (file)
@@ -2187,8 +2187,10 @@ void bnx2x_vf_mbx_schedule(struct bnx2x *bp,
 
        /* Update VFDB with current message and schedule its handling */
        mutex_lock(&BP_VFDB(bp)->event_mutex);
-       BP_VF_MBX(bp, vf_idx)->vf_addr_hi = vfpf_event->msg_addr_hi;
-       BP_VF_MBX(bp, vf_idx)->vf_addr_lo = vfpf_event->msg_addr_lo;
+       BP_VF_MBX(bp, vf_idx)->vf_addr_hi =
+               le32_to_cpu(vfpf_event->msg_addr_hi);
+       BP_VF_MBX(bp, vf_idx)->vf_addr_lo =
+               le32_to_cpu(vfpf_event->msg_addr_lo);
        BP_VFDB(bp)->event_occur |= (1ULL << vf_idx);
        mutex_unlock(&BP_VFDB(bp)->event_mutex);
 
index 8ab000dd52d958317323c8486fc0538576727803..82f191382989b04877177075ea1906f0e941dfb9 100644 (file)
@@ -248,7 +248,8 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
                tx_push1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
                tx_push1->tx_bd_cfa_action = cpu_to_le32(cfa_action);
 
-               end = PTR_ALIGN(pdata + length + 1, 8) - 1;
+               end = pdata + length;
+               end = PTR_ALIGN(end, 8) - 1;
                *end = 0;
 
                skb_copy_from_linear_data(skb, pdata, len);
index 04b0d16b210e89dd6eae3aa6704987e8543edeca..95bc470ae441af824f7b6ce29df5ff17842d7fee 100644 (file)
@@ -987,7 +987,7 @@ bna_rxf_ucast_cfg_apply(struct bna_rxf *rxf)
        if (!list_empty(&rxf->ucast_pending_add_q)) {
                mac = list_first_entry(&rxf->ucast_pending_add_q,
                                       struct bna_mac, qe);
-               list_add_tail(&mac->qe, &rxf->ucast_active_q);
+               list_move_tail(&mac->qe, &rxf->ucast_active_q);
                bna_bfi_ucast_req(rxf, mac, BFI_ENET_H2I_MAC_UCAST_ADD_REQ);
                return 1;
        }
index 688828865c48253d5f96e0bd4cab4c315a635e37..34e9acea87479db83b852c4e233c3e26b532a6a1 100644 (file)
 #define NIC_PF_INTR_ID_MBOX0           8
 #define NIC_PF_INTR_ID_MBOX1           9
 
+/* Minimum FIFO level before all packets for the CQ are dropped
+ *
+ * This value ensures that once a packet has been "accepted"
+ * for reception it will not get dropped due to non-availability
+ * of CQ descriptor. An errata in HW mandates this value to be
+ * atleast 0x100.
+ */
+#define NICPF_CQM_MIN_DROP_LEVEL       0x100
+
 /* Global timer for CQ timer thresh interrupts
  * Calculated for SCLK of 700Mhz
  * value written should be a 1/16th of what is expected
index 4dded90076c8783b95c7944131e6b2cd3b828e40..95f17f8cadacc08637c485e555242e78aafb48c5 100644 (file)
@@ -304,6 +304,7 @@ static void nic_set_lmac_vf_mapping(struct nicpf *nic)
 static void nic_init_hw(struct nicpf *nic)
 {
        int i;
+       u64 cqm_cfg;
 
        /* Enable NIC HW block */
        nic_reg_write(nic, NIC_PF_CFG, 0x3);
@@ -340,6 +341,11 @@ static void nic_init_hw(struct nicpf *nic)
        /* Enable VLAN ethertype matching and stripping */
        nic_reg_write(nic, NIC_PF_RX_ETYPE_0_7,
                      (2 << 19) | (ETYPE_ALG_VLAN_STRIP << 16) | ETH_P_8021Q);
+
+       /* Check if HW expected value is higher (could be in future chips) */
+       cqm_cfg = nic_reg_read(nic, NIC_PF_CQM_CFG);
+       if (cqm_cfg < NICPF_CQM_MIN_DROP_LEVEL)
+               nic_reg_write(nic, NIC_PF_CQM_CFG, NICPF_CQM_MIN_DROP_LEVEL);
 }
 
 /* Channel parse index configuration */
index dd536be20193119c3465cd772a4bff6d649e3319..afb10e326b4fc46043b4d3f4cddb7d17f1cc341c 100644 (file)
@@ -21,7 +21,7 @@
 #define   NIC_PF_TCP_TIMER                     (0x0060)
 #define   NIC_PF_BP_CFG                                (0x0080)
 #define   NIC_PF_RRM_CFG                       (0x0088)
-#define   NIC_PF_CQM_CF                                (0x00A0)
+#define   NIC_PF_CQM_CFG                       (0x00A0)
 #define   NIC_PF_CNM_CF                                (0x00A8)
 #define   NIC_PF_CNM_STATUS                    (0x00B0)
 #define   NIC_PF_CQ_AVG_CFG                    (0x00C0)
index cf837831304be2f4d7edaebf68880a44f96bd057..f9751294ece79e54eff07e20ad9122f585bda32a 100644 (file)
@@ -531,6 +531,7 @@ struct be_adapter {
 
        struct delayed_work be_err_detection_work;
        u8 err_flags;
+       bool pcicfg_mapped;     /* pcicfg obtained via pci_iomap() */
        u32 flags;
        u32 cmd_privileges;
        /* Ethtool knobs and info */
index 241819b36ca72ac6c133875f88d17f4359c0c5d4..6d9a8d78e8ad8413f075960ee45f0033e222861c 100644 (file)
@@ -622,10 +622,13 @@ enum be_if_flags {
                                         BE_IF_FLAGS_VLAN_PROMISCUOUS |\
                                         BE_IF_FLAGS_MCAST_PROMISCUOUS)
 
-#define BE_IF_EN_FLAGS (BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_PASS_L3L4_ERRORS |\
-                       BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_UNTAGGED)
+#define BE_IF_FILT_FLAGS_BASIC (BE_IF_FLAGS_BROADCAST | \
+                               BE_IF_FLAGS_PASS_L3L4_ERRORS | \
+                               BE_IF_FLAGS_UNTAGGED)
 
-#define BE_IF_ALL_FILT_FLAGS   (BE_IF_EN_FLAGS | BE_IF_FLAGS_ALL_PROMISCUOUS)
+#define BE_IF_ALL_FILT_FLAGS   (BE_IF_FILT_FLAGS_BASIC | \
+                                BE_IF_FLAGS_MULTICAST | \
+                                BE_IF_FLAGS_ALL_PROMISCUOUS)
 
 /* An RX interface is an object with one or more MAC addresses and
  * filtering capabilities. */
index f99de3657ce3b5f58b6f08f1f97f470bb11d7788..d1cf1274fc2f4de4b763e683b182e485e39452e6 100644 (file)
@@ -125,6 +125,11 @@ static const char * const ue_status_hi_desc[] = {
        "Unknown"
 };
 
+#define BE_VF_IF_EN_FLAGS      (BE_IF_FLAGS_UNTAGGED | \
+                                BE_IF_FLAGS_BROADCAST | \
+                                BE_IF_FLAGS_MULTICAST | \
+                                BE_IF_FLAGS_PASS_L3L4_ERRORS)
+
 static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
 {
        struct be_dma_mem *mem = &q->dma_mem;
@@ -3537,7 +3542,7 @@ static int be_enable_if_filters(struct be_adapter *adapter)
 {
        int status;
 
-       status = be_cmd_rx_filter(adapter, BE_IF_EN_FLAGS, ON);
+       status = be_cmd_rx_filter(adapter, BE_IF_FILT_FLAGS_BASIC, ON);
        if (status)
                return status;
 
@@ -3857,8 +3862,7 @@ static int be_vfs_if_create(struct be_adapter *adapter)
        int status;
 
        /* If a FW profile exists, then cap_flags are updated */
-       cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
-                   BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
+       cap_flags = BE_VF_IF_EN_FLAGS;
 
        for_all_vfs(adapter, vf_cfg, vf) {
                if (!BE3_chip(adapter)) {
@@ -3874,10 +3878,8 @@ static int be_vfs_if_create(struct be_adapter *adapter)
                        }
                }
 
-               en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
-                                       BE_IF_FLAGS_BROADCAST |
-                                       BE_IF_FLAGS_MULTICAST |
-                                       BE_IF_FLAGS_PASS_L3L4_ERRORS);
+               /* PF should enable IF flags during proxy if_create call */
+               en_flags = cap_flags & BE_VF_IF_EN_FLAGS;
                status = be_cmd_if_create(adapter, cap_flags, en_flags,
                                          &vf_cfg->if_handle, vf + 1);
                if (status)
@@ -4968,6 +4970,8 @@ static void be_unmap_pci_bars(struct be_adapter *adapter)
                pci_iounmap(adapter->pdev, adapter->csr);
        if (adapter->db)
                pci_iounmap(adapter->pdev, adapter->db);
+       if (adapter->pcicfg && adapter->pcicfg_mapped)
+               pci_iounmap(adapter->pdev, adapter->pcicfg);
 }
 
 static int db_bar(struct be_adapter *adapter)
@@ -5019,8 +5023,10 @@ static int be_map_pci_bars(struct be_adapter *adapter)
                        if (!addr)
                                goto pci_map_err;
                        adapter->pcicfg = addr;
+                       adapter->pcicfg_mapped = true;
                } else {
                        adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET;
+                       adapter->pcicfg_mapped = false;
                }
        }
 
index 62fa136554ac29a49074e5f176b3fc9ac8d37042..41b01064510098c00a66d86bb49cf2f3fe02f9c3 100644 (file)
@@ -1265,7 +1265,6 @@ static int ethoc_remove(struct platform_device *pdev)
 
                if (priv->mdio) {
                        mdiobus_unregister(priv->mdio);
-                       kfree(priv->mdio->irq);
                        mdiobus_free(priv->mdio);
                }
                if (priv->clk)
index 623aa1c8ebc6ba3149fc529e04994496b2e7be18..79a210aaf0bbd69d86ce830a45909ae8ede07f83 100644 (file)
@@ -2791,6 +2791,8 @@ static struct fman *read_dts_node(struct platform_device *of_dev)
                goto fman_free;
        }
 
+       fman->dev = &of_dev->dev;
+
        return fman;
 
 fman_node_put:
@@ -2845,8 +2847,6 @@ static int fman_probe(struct platform_device *of_dev)
 
        dev_set_drvdata(dev, fman);
 
-       fman->dev = dev;
-
        dev_dbg(dev, "FMan%d probed\n", fman->dts_params.id);
 
        return 0;
index 2aa7b401cc3be29eb24a8fb6d1065bff6caa6e39..b9ecf197ad117754245a290964d7cd55d9be5d4e 100644 (file)
@@ -1111,8 +1111,10 @@ static void __gfar_detect_errata_85xx(struct gfar_private *priv)
 
        if ((SVR_SOC_VER(svr) == SVR_8548) && (SVR_REV(svr) == 0x20))
                priv->errata |= GFAR_ERRATA_12;
+       /* P2020/P1010 Rev 1; MPC8548 Rev 2 */
        if (((SVR_SOC_VER(svr) == SVR_P2020) && (SVR_REV(svr) < 0x20)) ||
-           ((SVR_SOC_VER(svr) == SVR_P2010) && (SVR_REV(svr) < 0x20)))
+           ((SVR_SOC_VER(svr) == SVR_P2010) && (SVR_REV(svr) < 0x20)) ||
+           ((SVR_SOC_VER(svr) == SVR_8548) && (SVR_REV(svr) < 0x31)))
                priv->errata |= GFAR_ERRATA_76; /* aka eTSEC 20 */
 }
 #endif
index 74beb1867230c9cf62a60a2843825316e6d9cdbc..4ccc032633c4e57576b316cca8f0486a3e8b4a98 100644 (file)
@@ -25,6 +25,7 @@ config HIX5HD2_GMAC
 
 config HIP04_ETH
        tristate "HISILICON P04 Ethernet support"
+       depends on HAS_IOMEM    # For MFD_SYSCON
        select MARVELL_PHY
        select MFD_SYSCON
        select HNS_MDIO
index a0070d0e740dae5b3de8f2b2a92a772854b08321..d4f92ed322d6e5c4ac10a3f8d208d94445760e27 100644 (file)
@@ -675,8 +675,12 @@ static int hns_ae_config_loopback(struct hnae_handle *handle,
 {
        int ret;
        struct hnae_vf_cb *vf_cb = hns_ae_get_vf_cb(handle);
+       struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle);
 
        switch (loop) {
+       case MAC_INTERNALLOOP_PHY:
+               ret = 0;
+               break;
        case MAC_INTERNALLOOP_SERDES:
                ret = hns_mac_config_sds_loopback(vf_cb->mac_cb, en);
                break;
@@ -686,6 +690,10 @@ static int hns_ae_config_loopback(struct hnae_handle *handle,
        default:
                ret = -EINVAL;
        }
+
+       if (!ret)
+               hns_dsaf_set_inner_lb(mac_cb->dsaf_dev, mac_cb->mac_id, en);
+
        return ret;
 }
 
index 9439f04962e1d96bf480d8aedf6472c912e28813..38fc5be3870cc3258755e79d13ac501ffa0826e4 100644 (file)
@@ -230,6 +230,30 @@ static void hns_dsaf_mix_def_qid_cfg(struct dsaf_device *dsaf_dev)
        }
 }
 
+static void hns_dsaf_inner_qid_cfg(struct dsaf_device *dsaf_dev)
+{
+       u16 max_q_per_vf, max_vfn;
+       u32 q_id, q_num_per_port;
+       u32 mac_id;
+
+       if (AE_IS_VER1(dsaf_dev->dsaf_ver))
+               return;
+
+       hns_rcb_get_queue_mode(dsaf_dev->dsaf_mode,
+                              HNS_DSAF_COMM_SERVICE_NW_IDX,
+                              &max_vfn, &max_q_per_vf);
+       q_num_per_port = max_vfn * max_q_per_vf;
+
+       for (mac_id = 0, q_id = 0; mac_id < DSAF_SERVICE_NW_NUM; mac_id++) {
+               dsaf_set_dev_field(dsaf_dev,
+                                  DSAFV2_SERDES_LBK_0_REG + 4 * mac_id,
+                                  DSAFV2_SERDES_LBK_QID_M,
+                                  DSAFV2_SERDES_LBK_QID_S,
+                                  q_id);
+               q_id += q_num_per_port;
+       }
+}
+
 /**
  * hns_dsaf_sw_port_type_cfg - cfg sw type
  * @dsaf_id: dsa fabric id
@@ -691,6 +715,16 @@ void hns_dsaf_set_promisc_mode(struct dsaf_device *dsaf_dev, u32 en)
        dsaf_set_dev_bit(dsaf_dev, DSAF_CFG_0_REG, DSAF_CFG_MIX_MODE_S, !!en);
 }
 
+void hns_dsaf_set_inner_lb(struct dsaf_device *dsaf_dev, u32 mac_id, u32 en)
+{
+       if (AE_IS_VER1(dsaf_dev->dsaf_ver) ||
+           dsaf_dev->mac_cb[mac_id].mac_type == HNAE_PORT_DEBUG)
+               return;
+
+       dsaf_set_dev_bit(dsaf_dev, DSAFV2_SERDES_LBK_0_REG + 4 * mac_id,
+                        DSAFV2_SERDES_LBK_EN_B, !!en);
+}
+
 /**
  * hns_dsaf_tbl_stat_en - tbl
  * @dsaf_id: dsa fabric id
@@ -1022,6 +1056,9 @@ static void hns_dsaf_comm_init(struct dsaf_device *dsaf_dev)
        /* set promisc def queue id */
        hns_dsaf_mix_def_qid_cfg(dsaf_dev);
 
+       /* set inner loopback queue id */
+       hns_dsaf_inner_qid_cfg(dsaf_dev);
+
        /* in non switch mode, set all port to access mode */
        hns_dsaf_sw_port_type_cfg(dsaf_dev, DSAF_SW_PORT_TYPE_NON_VLAN);
 
index 40205b910f80e66a439c14b53e7f2c907b0fbeb1..5fea226efaf330f968b2942a55065c7fc5c01112 100644 (file)
@@ -417,5 +417,6 @@ void hns_dsaf_get_strings(int stringset, u8 *data, int port);
 void hns_dsaf_get_regs(struct dsaf_device *ddev, u32 port, void *data);
 int hns_dsaf_get_regs_count(void);
 void hns_dsaf_set_promisc_mode(struct dsaf_device *dsaf_dev, u32 en);
+void hns_dsaf_set_inner_lb(struct dsaf_device *dsaf_dev, u32 mac_id, u32 en);
 
 #endif /* __HNS_DSAF_MAIN_H__ */
index f0c4f9b09d5b0477365c862816bc8b8d4dd19b88..60d695daa471eb23d5284778300807f8b235e546 100644 (file)
 #define DSAF_XGE_INT_STS_0_REG         0x1C0
 #define DSAF_PPE_INT_STS_0_REG         0x1E0
 #define DSAF_ROCEE_INT_STS_0_REG       0x200
+#define DSAFV2_SERDES_LBK_0_REG         0x220
 #define DSAF_PPE_QID_CFG_0_REG         0x300
 #define DSAF_SW_PORT_TYPE_0_REG                0x320
 #define DSAF_STP_PORT_TYPE_0_REG       0x340
 #define PPEV2_CFG_RSS_TBL_4N3_S        24
 #define PPEV2_CFG_RSS_TBL_4N3_M        (((1UL << 5) - 1) << PPEV2_CFG_RSS_TBL_4N3_S)
 
+#define DSAFV2_SERDES_LBK_EN_B  8
+#define DSAFV2_SERDES_LBK_QID_S 0
+#define DSAFV2_SERDES_LBK_QID_M        (((1UL << 8) - 1) << DSAFV2_SERDES_LBK_QID_S)
+
 #define PPE_CNT_CLR_CE_B       0
 #define PPE_CNT_CLR_SNAP_EN_B  1
 
index 3df22840fcd15370393e7e6a5c331b72dd87f476..3c4a3bc31a89230c2453c115316306739e28c2a3 100644 (file)
@@ -295,8 +295,10 @@ static int __lb_setup(struct net_device *ndev,
 
        switch (loop) {
        case MAC_INTERNALLOOP_PHY:
-               if ((phy_dev) && (!phy_dev->is_c45))
+               if ((phy_dev) && (!phy_dev->is_c45)) {
                        ret = hns_nic_config_phy_loopback(phy_dev, 0x1);
+                       ret |= h->dev->ops->set_loopback(h, loop, 0x1);
+               }
                break;
        case MAC_INTERNALLOOP_MAC:
                if ((h->dev->ops->set_loopback) &&
@@ -376,6 +378,7 @@ static void __lb_other_process(struct hns_nic_ring_data *ring_data,
                               struct sk_buff *skb)
 {
        struct net_device *ndev;
+       struct hns_nic_priv *priv;
        struct hnae_ring *ring;
        struct netdev_queue *dev_queue;
        struct sk_buff *new_skb;
@@ -385,8 +388,17 @@ static void __lb_other_process(struct hns_nic_ring_data *ring_data,
        char buff[33]; /* 32B data and the last character '\0' */
 
        if (!ring_data) { /* Just for doing create frame*/
+               ndev = skb->dev;
+               priv = netdev_priv(ndev);
+
                frame_size = skb->len;
                memset(skb->data, 0xFF, frame_size);
+               if ((!AE_IS_VER1(priv->enet_ver)) &&
+                   (priv->ae_handle->port_type == HNAE_PORT_SERVICE)) {
+                       memcpy(skb->data, ndev->dev_addr, 6);
+                       skb->data[5] += 0x1f;
+               }
+
                frame_size &= ~1ul;
                memset(&skb->data[frame_size / 2], 0xAA, frame_size / 2 - 1);
                memset(&skb->data[frame_size / 2 + 10], 0xBE,
@@ -486,6 +498,7 @@ static int __lb_run_test(struct net_device *ndev,
 
        /* place data into test skb */
        (void)skb_put(skb, size);
+       skb->dev = ndev;
        __lb_other_process(NULL, skb);
        skb->queue_mapping = NIC_LB_TEST_RING_ID;
 
index 335417b4756b73d3d2d9f1558b10b2556443bee7..ebe60719e489cd1fbdc12248ee48eed6d0979fd4 100644 (file)
@@ -1166,7 +1166,10 @@ map_failed:
        if (!firmware_has_feature(FW_FEATURE_CMO))
                netdev_err(netdev, "tx: unable to map xmit buffer\n");
        adapter->tx_map_failed++;
-       skb_linearize(skb);
+       if (skb_linearize(skb)) {
+               netdev->stats.tx_dropped++;
+               goto out;
+       }
        force_bounce = 1;
        goto retry_bounce;
 }
index 7d65708437238b48f5c5cdfcb1d901b202e2d547..6e9e16eee5d0eff7d74a2eff4a8a4bb91368afcd 100644 (file)
@@ -1348,44 +1348,44 @@ static void init_sub_crqs(struct ibmvnic_adapter *adapter, int retry)
        crq.request_capability.cmd = REQUEST_CAPABILITY;
 
        crq.request_capability.capability = cpu_to_be16(REQ_TX_QUEUES);
-       crq.request_capability.number = cpu_to_be32(adapter->req_tx_queues);
+       crq.request_capability.number = cpu_to_be64(adapter->req_tx_queues);
        ibmvnic_send_crq(adapter, &crq);
 
        crq.request_capability.capability = cpu_to_be16(REQ_RX_QUEUES);
-       crq.request_capability.number = cpu_to_be32(adapter->req_rx_queues);
+       crq.request_capability.number = cpu_to_be64(adapter->req_rx_queues);
        ibmvnic_send_crq(adapter, &crq);
 
        crq.request_capability.capability = cpu_to_be16(REQ_RX_ADD_QUEUES);
-       crq.request_capability.number = cpu_to_be32(adapter->req_rx_add_queues);
+       crq.request_capability.number = cpu_to_be64(adapter->req_rx_add_queues);
        ibmvnic_send_crq(adapter, &crq);
 
        crq.request_capability.capability =
            cpu_to_be16(REQ_TX_ENTRIES_PER_SUBCRQ);
        crq.request_capability.number =
-           cpu_to_be32(adapter->req_tx_entries_per_subcrq);
+           cpu_to_be64(adapter->req_tx_entries_per_subcrq);
        ibmvnic_send_crq(adapter, &crq);
 
        crq.request_capability.capability =
            cpu_to_be16(REQ_RX_ADD_ENTRIES_PER_SUBCRQ);
        crq.request_capability.number =
-           cpu_to_be32(adapter->req_rx_add_entries_per_subcrq);
+           cpu_to_be64(adapter->req_rx_add_entries_per_subcrq);
        ibmvnic_send_crq(adapter, &crq);
 
        crq.request_capability.capability = cpu_to_be16(REQ_MTU);
-       crq.request_capability.number = cpu_to_be32(adapter->req_mtu);
+       crq.request_capability.number = cpu_to_be64(adapter->req_mtu);
        ibmvnic_send_crq(adapter, &crq);
 
        if (adapter->netdev->flags & IFF_PROMISC) {
                if (adapter->promisc_supported) {
                        crq.request_capability.capability =
                            cpu_to_be16(PROMISC_REQUESTED);
-                       crq.request_capability.number = cpu_to_be32(1);
+                       crq.request_capability.number = cpu_to_be64(1);
                        ibmvnic_send_crq(adapter, &crq);
                }
        } else {
                crq.request_capability.capability =
                    cpu_to_be16(PROMISC_REQUESTED);
-               crq.request_capability.number = cpu_to_be32(0);
+               crq.request_capability.number = cpu_to_be64(0);
                ibmvnic_send_crq(adapter, &crq);
        }
 
@@ -2312,93 +2312,93 @@ static void handle_query_cap_rsp(union ibmvnic_crq *crq,
        switch (be16_to_cpu(crq->query_capability.capability)) {
        case MIN_TX_QUEUES:
                adapter->min_tx_queues =
-                   be32_to_cpu(crq->query_capability.number);
+                   be64_to_cpu(crq->query_capability.number);
                netdev_dbg(netdev, "min_tx_queues = %lld\n",
                           adapter->min_tx_queues);
                break;
        case MIN_RX_QUEUES:
                adapter->min_rx_queues =
-                   be32_to_cpu(crq->query_capability.number);
+                   be64_to_cpu(crq->query_capability.number);
                netdev_dbg(netdev, "min_rx_queues = %lld\n",
                           adapter->min_rx_queues);
                break;
        case MIN_RX_ADD_QUEUES:
                adapter->min_rx_add_queues =
-                   be32_to_cpu(crq->query_capability.number);
+                   be64_to_cpu(crq->query_capability.number);
                netdev_dbg(netdev, "min_rx_add_queues = %lld\n",
                           adapter->min_rx_add_queues);
                break;
        case MAX_TX_QUEUES:
                adapter->max_tx_queues =
-                   be32_to_cpu(crq->query_capability.number);
+                   be64_to_cpu(crq->query_capability.number);
                netdev_dbg(netdev, "max_tx_queues = %lld\n",
                           adapter->max_tx_queues);
                break;
        case MAX_RX_QUEUES:
                adapter->max_rx_queues =
-                   be32_to_cpu(crq->query_capability.number);
+                   be64_to_cpu(crq->query_capability.number);
                netdev_dbg(netdev, "max_rx_queues = %lld\n",
                           adapter->max_rx_queues);
                break;
        case MAX_RX_ADD_QUEUES:
                adapter->max_rx_add_queues =
-                   be32_to_cpu(crq->query_capability.number);
+                   be64_to_cpu(crq->query_capability.number);
                netdev_dbg(netdev, "max_rx_add_queues = %lld\n",
                           adapter->max_rx_add_queues);
                break;
        case MIN_TX_ENTRIES_PER_SUBCRQ:
                adapter->min_tx_entries_per_subcrq =
-                   be32_to_cpu(crq->query_capability.number);
+                   be64_to_cpu(crq->query_capability.number);
                netdev_dbg(netdev, "min_tx_entries_per_subcrq = %lld\n",
                           adapter->min_tx_entries_per_subcrq);
                break;
        case MIN_RX_ADD_ENTRIES_PER_SUBCRQ:
                adapter->min_rx_add_entries_per_subcrq =
-                   be32_to_cpu(crq->query_capability.number);
+                   be64_to_cpu(crq->query_capability.number);
                netdev_dbg(netdev, "min_rx_add_entrs_per_subcrq = %lld\n",
                           adapter->min_rx_add_entries_per_subcrq);
                break;
        case MAX_TX_ENTRIES_PER_SUBCRQ:
                adapter->max_tx_entries_per_subcrq =
-                   be32_to_cpu(crq->query_capability.number);
+                   be64_to_cpu(crq->query_capability.number);
                netdev_dbg(netdev, "max_tx_entries_per_subcrq = %lld\n",
                           adapter->max_tx_entries_per_subcrq);
                break;
        case MAX_RX_ADD_ENTRIES_PER_SUBCRQ:
                adapter->max_rx_add_entries_per_subcrq =
-                   be32_to_cpu(crq->query_capability.number);
+                   be64_to_cpu(crq->query_capability.number);
                netdev_dbg(netdev, "max_rx_add_entrs_per_subcrq = %lld\n",
                           adapter->max_rx_add_entries_per_subcrq);
                break;
        case TCP_IP_OFFLOAD:
                adapter->tcp_ip_offload =
-                   be32_to_cpu(crq->query_capability.number);
+                   be64_to_cpu(crq->query_capability.number);
                netdev_dbg(netdev, "tcp_ip_offload = %lld\n",
                           adapter->tcp_ip_offload);
                break;
        case PROMISC_SUPPORTED:
                adapter->promisc_supported =
-                   be32_to_cpu(crq->query_capability.number);
+                   be64_to_cpu(crq->query_capability.number);
                netdev_dbg(netdev, "promisc_supported = %lld\n",
                           adapter->promisc_supported);
                break;
        case MIN_MTU:
-               adapter->min_mtu = be32_to_cpu(crq->query_capability.number);
+               adapter->min_mtu = be64_to_cpu(crq->query_capability.number);
                netdev_dbg(netdev, "min_mtu = %lld\n", adapter->min_mtu);
                break;
        case MAX_MTU:
-               adapter->max_mtu = be32_to_cpu(crq->query_capability.number);
+               adapter->max_mtu = be64_to_cpu(crq->query_capability.number);
                netdev_dbg(netdev, "max_mtu = %lld\n", adapter->max_mtu);
                break;
        case MAX_MULTICAST_FILTERS:
                adapter->max_multicast_filters =
-                   be32_to_cpu(crq->query_capability.number);
+                   be64_to_cpu(crq->query_capability.number);
                netdev_dbg(netdev, "max_multicast_filters = %lld\n",
                           adapter->max_multicast_filters);
                break;
        case VLAN_HEADER_INSERTION:
                adapter->vlan_header_insertion =
-                   be32_to_cpu(crq->query_capability.number);
+                   be64_to_cpu(crq->query_capability.number);
                if (adapter->vlan_header_insertion)
                        netdev->features |= NETIF_F_HW_VLAN_STAG_TX;
                netdev_dbg(netdev, "vlan_header_insertion = %lld\n",
@@ -2406,43 +2406,43 @@ static void handle_query_cap_rsp(union ibmvnic_crq *crq,
                break;
        case MAX_TX_SG_ENTRIES:
                adapter->max_tx_sg_entries =
-                   be32_to_cpu(crq->query_capability.number);
+                   be64_to_cpu(crq->query_capability.number);
                netdev_dbg(netdev, "max_tx_sg_entries = %lld\n",
                           adapter->max_tx_sg_entries);
                break;
        case RX_SG_SUPPORTED:
                adapter->rx_sg_supported =
-                   be32_to_cpu(crq->query_capability.number);
+                   be64_to_cpu(crq->query_capability.number);
                netdev_dbg(netdev, "rx_sg_supported = %lld\n",
                           adapter->rx_sg_supported);
                break;
        case OPT_TX_COMP_SUB_QUEUES:
                adapter->opt_tx_comp_sub_queues =
-                   be32_to_cpu(crq->query_capability.number);
+                   be64_to_cpu(crq->query_capability.number);
                netdev_dbg(netdev, "opt_tx_comp_sub_queues = %lld\n",
                           adapter->opt_tx_comp_sub_queues);
                break;
        case OPT_RX_COMP_QUEUES:
                adapter->opt_rx_comp_queues =
-                   be32_to_cpu(crq->query_capability.number);
+                   be64_to_cpu(crq->query_capability.number);
                netdev_dbg(netdev, "opt_rx_comp_queues = %lld\n",
                           adapter->opt_rx_comp_queues);
                break;
        case OPT_RX_BUFADD_Q_PER_RX_COMP_Q:
                adapter->opt_rx_bufadd_q_per_rx_comp_q =
-                   be32_to_cpu(crq->query_capability.number);
+                   be64_to_cpu(crq->query_capability.number);
                netdev_dbg(netdev, "opt_rx_bufadd_q_per_rx_comp_q = %lld\n",
                           adapter->opt_rx_bufadd_q_per_rx_comp_q);
                break;
        case OPT_TX_ENTRIES_PER_SUBCRQ:
                adapter->opt_tx_entries_per_subcrq =
-                   be32_to_cpu(crq->query_capability.number);
+                   be64_to_cpu(crq->query_capability.number);
                netdev_dbg(netdev, "opt_tx_entries_per_subcrq = %lld\n",
                           adapter->opt_tx_entries_per_subcrq);
                break;
        case OPT_RXBA_ENTRIES_PER_SUBCRQ:
                adapter->opt_rxba_entries_per_subcrq =
-                   be32_to_cpu(crq->query_capability.number);
+                   be64_to_cpu(crq->query_capability.number);
                netdev_dbg(netdev, "opt_rxba_entries_per_subcrq = %lld\n",
                           adapter->opt_rxba_entries_per_subcrq);
                break;
index 1242925ad34cb6b0336524b1c3c1e569158aee24..1a9993cc79b572f58c172a5f4a071594cf4a93b1 100644 (file)
@@ -319,10 +319,8 @@ struct ibmvnic_capability {
        u8 first;
        u8 cmd;
        __be16 capability; /* one of ibmvnic_capabilities */
+       __be64 number;
        struct ibmvnic_rc rc;
-       __be32 number; /*FIX: should be __be64, but I'm getting the least
-                       * significant word first
-                       */
 } __packed __aligned(8);
 
 struct ibmvnic_login {
index b1de7afd41166cc9ef67ed7e43191aaff897ec4e..3ddf657bc10bcc51d5a217595ba8e98ed909e0cd 100644 (file)
@@ -270,11 +270,17 @@ jme_reset_mac_processor(struct jme_adapter *jme)
 }
 
 static inline void
-jme_clear_pm(struct jme_adapter *jme)
+jme_clear_pm_enable_wol(struct jme_adapter *jme)
 {
        jwrite32(jme, JME_PMCS, PMCS_STMASK | jme->reg_pmcs);
 }
 
+static inline void
+jme_clear_pm_disable_wol(struct jme_adapter *jme)
+{
+       jwrite32(jme, JME_PMCS, PMCS_STMASK);
+}
+
 static int
 jme_reload_eeprom(struct jme_adapter *jme)
 {
@@ -1853,7 +1859,7 @@ jme_open(struct net_device *netdev)
        struct jme_adapter *jme = netdev_priv(netdev);
        int rc;
 
-       jme_clear_pm(jme);
+       jme_clear_pm_disable_wol(jme);
        JME_NAPI_ENABLE(jme);
 
        tasklet_init(&jme->linkch_task, jme_link_change_tasklet,
@@ -1925,11 +1931,11 @@ jme_wait_link(struct jme_adapter *jme)
 static void
 jme_powersave_phy(struct jme_adapter *jme)
 {
-       if (jme->reg_pmcs) {
+       if (jme->reg_pmcs && device_may_wakeup(&jme->pdev->dev)) {
                jme_set_100m_half(jme);
                if (jme->reg_pmcs & (PMCS_LFEN | PMCS_LREN))
                        jme_wait_link(jme);
-               jme_clear_pm(jme);
+               jme_clear_pm_enable_wol(jme);
        } else {
                jme_phy_off(jme);
        }
@@ -2646,9 +2652,6 @@ jme_set_wol(struct net_device *netdev,
        if (wol->wolopts & WAKE_MAGIC)
                jme->reg_pmcs |= PMCS_MFEN;
 
-       jwrite32(jme, JME_PMCS, jme->reg_pmcs);
-       device_set_wakeup_enable(&jme->pdev->dev, !!(jme->reg_pmcs));
-
        return 0;
 }
 
@@ -3172,8 +3175,8 @@ jme_init_one(struct pci_dev *pdev,
        jme->mii_if.mdio_read = jme_mdio_read;
        jme->mii_if.mdio_write = jme_mdio_write;
 
-       jme_clear_pm(jme);
-       device_set_wakeup_enable(&pdev->dev, true);
+       jme_clear_pm_disable_wol(jme);
+       device_init_wakeup(&pdev->dev, true);
 
        jme_set_phyfifo_5level(jme);
        jme->pcirev = pdev->revision;
@@ -3304,7 +3307,7 @@ jme_resume(struct device *dev)
        if (!netif_running(netdev))
                return 0;
 
-       jme_clear_pm(jme);
+       jme_clear_pm_disable_wol(jme);
        jme_phy_on(jme);
        if (test_bit(JME_FLAG_SSET, &jme->flags))
                jme_set_settings(netdev, &jme->old_ecmd);
@@ -3312,13 +3315,14 @@ jme_resume(struct device *dev)
                jme_reset_phy_processor(jme);
        jme_phy_calibration(jme);
        jme_phy_setEA(jme);
-       jme_start_irq(jme);
        netif_device_attach(netdev);
 
        atomic_inc(&jme->link_changing);
 
        jme_reset_link(jme);
 
+       jme_start_irq(jme);
+
        return 0;
 }
 
index f191a16125893e5b742c5d99ea7bd814dba433a5..21e2c09602716351a21dc2762f1631a298bf05eb 100644 (file)
@@ -2245,7 +2245,7 @@ static int mlx4_en_set_vf_mac(struct net_device *dev, int queue, u8 *mac)
        struct mlx4_en_dev *mdev = en_priv->mdev;
        u64 mac_u64 = mlx4_mac_to_u64(mac);
 
-       if (!is_valid_ether_addr(mac))
+       if (is_multicast_ether_addr(mac))
                return -EINVAL;
 
        return mlx4_set_vf_mac(mdev->dev, en_priv->port, queue, mac_u64);
index 2cc3c626c3fea732d0862ff5f0ce520d442a6205..f8674ae62752d53bc768c16ac9b2de3053e183eb 100644 (file)
@@ -1256,6 +1256,7 @@ err_set_port:
 static int mlx4_mf_bond(struct mlx4_dev *dev)
 {
        int err = 0;
+       int nvfs;
        struct mlx4_slaves_pport slaves_port1;
        struct mlx4_slaves_pport slaves_port2;
        DECLARE_BITMAP(slaves_port_1_2, MLX4_MFUNC_MAX);
@@ -1272,11 +1273,18 @@ static int mlx4_mf_bond(struct mlx4_dev *dev)
                return -EINVAL;
        }
 
+       /* number of virtual functions is number of total functions minus one
+        * physical function for each port.
+        */
+       nvfs = bitmap_weight(slaves_port1.slaves, dev->persist->num_vfs + 1) +
+               bitmap_weight(slaves_port2.slaves, dev->persist->num_vfs + 1) - 2;
+
        /* limit on maximum allowed VFs */
-       if ((bitmap_weight(slaves_port1.slaves, dev->persist->num_vfs + 1) +
-           bitmap_weight(slaves_port2.slaves, dev->persist->num_vfs + 1)) >
-           MAX_MF_BOND_ALLOWED_SLAVES)
+       if (nvfs > MAX_MF_BOND_ALLOWED_SLAVES) {
+               mlx4_warn(dev, "HA mode is not supported for %d VFs (max %d are allowed)\n",
+                         nvfs, MAX_MF_BOND_ALLOWED_SLAVES);
                return -EINVAL;
+       }
 
        if (dev->caps.steering_mode != MLX4_STEERING_MODE_DEVICE_MANAGED) {
                mlx4_warn(dev, "HA mode unsupported for NON DMFS steering\n");
index 787b7bb54d52acd094570283bf6793f1bfb0dab0..211c65087997dd5a92cc53a13f9fd3869927d09d 100644 (file)
@@ -193,10 +193,10 @@ int __mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac)
        if (need_mf_bond) {
                if (port == 1) {
                        mutex_lock(&table->mutex);
-                       mutex_lock(&dup_table->mutex);
+                       mutex_lock_nested(&dup_table->mutex, SINGLE_DEPTH_NESTING);
                } else {
                        mutex_lock(&dup_table->mutex);
-                       mutex_lock(&table->mutex);
+                       mutex_lock_nested(&table->mutex, SINGLE_DEPTH_NESTING);
                }
        } else {
                mutex_lock(&table->mutex);
@@ -389,10 +389,10 @@ void __mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac)
        if (dup) {
                if (port == 1) {
                        mutex_lock(&table->mutex);
-                       mutex_lock(&dup_table->mutex);
+                       mutex_lock_nested(&dup_table->mutex, SINGLE_DEPTH_NESTING);
                } else {
                        mutex_lock(&dup_table->mutex);
-                       mutex_lock(&table->mutex);
+                       mutex_lock_nested(&table->mutex, SINGLE_DEPTH_NESTING);
                }
        } else {
                mutex_lock(&table->mutex);
@@ -479,10 +479,10 @@ int __mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac)
        if (dup) {
                if (port == 1) {
                        mutex_lock(&table->mutex);
-                       mutex_lock(&dup_table->mutex);
+                       mutex_lock_nested(&dup_table->mutex, SINGLE_DEPTH_NESTING);
                } else {
                        mutex_lock(&dup_table->mutex);
-                       mutex_lock(&table->mutex);
+                       mutex_lock_nested(&table->mutex, SINGLE_DEPTH_NESTING);
                }
        } else {
                mutex_lock(&table->mutex);
@@ -588,10 +588,10 @@ int __mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan,
        if (need_mf_bond) {
                if (port == 1) {
                        mutex_lock(&table->mutex);
-                       mutex_lock(&dup_table->mutex);
+                       mutex_lock_nested(&dup_table->mutex, SINGLE_DEPTH_NESTING);
                } else {
                        mutex_lock(&dup_table->mutex);
-                       mutex_lock(&table->mutex);
+                       mutex_lock_nested(&table->mutex, SINGLE_DEPTH_NESTING);
                }
        } else {
                mutex_lock(&table->mutex);
@@ -764,10 +764,10 @@ void __mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, u16 vlan)
        if (dup) {
                if (port == 1) {
                        mutex_lock(&table->mutex);
-                       mutex_lock(&dup_table->mutex);
+                       mutex_lock_nested(&dup_table->mutex, SINGLE_DEPTH_NESTING);
                } else {
                        mutex_lock(&dup_table->mutex);
-                       mutex_lock(&table->mutex);
+                       mutex_lock_nested(&table->mutex, SINGLE_DEPTH_NESTING);
                }
        } else {
                mutex_lock(&table->mutex);
index aac071a7e830b5fd777da7a5e4d014d802ad70d0..5b1753233c5dd8c30c1dd110a952c6d9c4360e39 100644 (file)
@@ -223,6 +223,7 @@ struct mlx5e_pport_stats {
 
 static const char rq_stats_strings[][ETH_GSTRING_LEN] = {
        "packets",
+       "bytes",
        "csum_none",
        "csum_sw",
        "lro_packets",
@@ -232,16 +233,18 @@ static const char rq_stats_strings[][ETH_GSTRING_LEN] = {
 
 struct mlx5e_rq_stats {
        u64 packets;
+       u64 bytes;
        u64 csum_none;
        u64 csum_sw;
        u64 lro_packets;
        u64 lro_bytes;
        u64 wqe_err;
-#define NUM_RQ_STATS 6
+#define NUM_RQ_STATS 7
 };
 
 static const char sq_stats_strings[][ETH_GSTRING_LEN] = {
        "packets",
+       "bytes",
        "tso_packets",
        "tso_bytes",
        "csum_offload_none",
@@ -253,6 +256,7 @@ static const char sq_stats_strings[][ETH_GSTRING_LEN] = {
 
 struct mlx5e_sq_stats {
        u64 packets;
+       u64 bytes;
        u64 tso_packets;
        u64 tso_bytes;
        u64 csum_offload_none;
@@ -260,7 +264,7 @@ struct mlx5e_sq_stats {
        u64 wake;
        u64 dropped;
        u64 nop;
-#define NUM_SQ_STATS 8
+#define NUM_SQ_STATS 9
 };
 
 struct mlx5e_stats {
@@ -304,14 +308,9 @@ enum {
        MLX5E_RQ_STATE_POST_WQES_ENABLE,
 };
 
-enum cq_flags {
-       MLX5E_CQ_HAS_CQES = 1,
-};
-
 struct mlx5e_cq {
        /* data path - accessed per cqe */
        struct mlx5_cqwq           wq;
-       unsigned long              flags;
 
        /* data path - accessed per napi poll */
        struct napi_struct        *napi;
@@ -452,6 +451,8 @@ enum mlx5e_traffic_types {
        MLX5E_NUM_TT,
 };
 
+#define IS_HASHING_TT(tt) (tt != MLX5E_TT_ANY)
+
 enum mlx5e_rqt_ix {
        MLX5E_INDIRECTION_RQT,
        MLX5E_SINGLE_RQ_RQT,
@@ -618,9 +619,12 @@ void mlx5e_enable_vlan_filter(struct mlx5e_priv *priv);
 void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv);
 
 int mlx5e_redirect_rqt(struct mlx5e_priv *priv, enum mlx5e_rqt_ix rqt_ix);
+void mlx5e_build_tir_ctx_hash(void *tirc, struct mlx5e_priv *priv);
 
 int mlx5e_open_locked(struct net_device *netdev);
 int mlx5e_close_locked(struct net_device *netdev);
+void mlx5e_build_default_indir_rqt(u32 *indirection_rqt, int len,
+                                  int num_channels);
 
 static inline void mlx5e_tx_notify_hw(struct mlx5e_sq *sq,
                                      struct mlx5e_tx_wqe *wqe, int bf_sz)
index be6543570b2be18d476773f6b530dfeab6057800..2018eebe1531563e036c41c19372331f8a576b14 100644 (file)
@@ -62,10 +62,11 @@ static void mlx5e_timestamp_overflow(struct work_struct *work)
        struct delayed_work *dwork = to_delayed_work(work);
        struct mlx5e_tstamp *tstamp = container_of(dwork, struct mlx5e_tstamp,
                                                   overflow_work);
+       unsigned long flags;
 
-       write_lock(&tstamp->lock);
+       write_lock_irqsave(&tstamp->lock, flags);
        timecounter_read(&tstamp->clock);
-       write_unlock(&tstamp->lock);
+       write_unlock_irqrestore(&tstamp->lock, flags);
        schedule_delayed_work(&tstamp->overflow_work, tstamp->overflow_period);
 }
 
@@ -136,10 +137,11 @@ static int mlx5e_ptp_settime(struct ptp_clock_info *ptp,
        struct mlx5e_tstamp *tstamp = container_of(ptp, struct mlx5e_tstamp,
                                                   ptp_info);
        u64 ns = timespec64_to_ns(ts);
+       unsigned long flags;
 
-       write_lock(&tstamp->lock);
+       write_lock_irqsave(&tstamp->lock, flags);
        timecounter_init(&tstamp->clock, &tstamp->cycles, ns);
-       write_unlock(&tstamp->lock);
+       write_unlock_irqrestore(&tstamp->lock, flags);
 
        return 0;
 }
@@ -150,10 +152,11 @@ static int mlx5e_ptp_gettime(struct ptp_clock_info *ptp,
        struct mlx5e_tstamp *tstamp = container_of(ptp, struct mlx5e_tstamp,
                                                   ptp_info);
        u64 ns;
+       unsigned long flags;
 
-       write_lock(&tstamp->lock);
+       write_lock_irqsave(&tstamp->lock, flags);
        ns = timecounter_read(&tstamp->clock);
-       write_unlock(&tstamp->lock);
+       write_unlock_irqrestore(&tstamp->lock, flags);
 
        *ts = ns_to_timespec64(ns);
 
@@ -164,10 +167,11 @@ static int mlx5e_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
 {
        struct mlx5e_tstamp *tstamp = container_of(ptp, struct mlx5e_tstamp,
                                                   ptp_info);
+       unsigned long flags;
 
-       write_lock(&tstamp->lock);
+       write_lock_irqsave(&tstamp->lock, flags);
        timecounter_adjtime(&tstamp->clock, delta);
-       write_unlock(&tstamp->lock);
+       write_unlock_irqrestore(&tstamp->lock, flags);
 
        return 0;
 }
@@ -176,6 +180,7 @@ static int mlx5e_ptp_adjfreq(struct ptp_clock_info *ptp, s32 delta)
 {
        u64 adj;
        u32 diff;
+       unsigned long flags;
        int neg_adj = 0;
        struct mlx5e_tstamp *tstamp = container_of(ptp, struct mlx5e_tstamp,
                                                  ptp_info);
@@ -189,11 +194,11 @@ static int mlx5e_ptp_adjfreq(struct ptp_clock_info *ptp, s32 delta)
        adj *= delta;
        diff = div_u64(adj, 1000000000ULL);
 
-       write_lock(&tstamp->lock);
+       write_lock_irqsave(&tstamp->lock, flags);
        timecounter_read(&tstamp->clock);
        tstamp->cycles.mult = neg_adj ? tstamp->nominal_c_mult - diff :
                                        tstamp->nominal_c_mult + diff;
-       write_unlock(&tstamp->lock);
+       write_unlock_irqrestore(&tstamp->lock, flags);
 
        return 0;
 }
index 65624ac65b4c347ac5dedf55f3b209de799e7e5f..5abeb00fceb8b0876d17f4aab813054d55414cc8 100644 (file)
@@ -385,6 +385,8 @@ static int mlx5e_set_channels(struct net_device *dev,
                mlx5e_close_locked(dev);
 
        priv->params.num_channels = count;
+       mlx5e_build_default_indir_rqt(priv->params.indirection_rqt,
+                                     MLX5E_INDIR_RQT_SIZE, count);
 
        if (was_opened)
                err = mlx5e_open_locked(dev);
@@ -703,18 +705,36 @@ static int mlx5e_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
        return 0;
 }
 
+static void mlx5e_modify_tirs_hash(struct mlx5e_priv *priv, void *in, int inlen)
+{
+       struct mlx5_core_dev *mdev = priv->mdev;
+       void *tirc = MLX5_ADDR_OF(modify_tir_in, in, ctx);
+       int i;
+
+       MLX5_SET(modify_tir_in, in, bitmask.hash, 1);
+       mlx5e_build_tir_ctx_hash(tirc, priv);
+
+       for (i = 0; i < MLX5E_NUM_TT; i++)
+               if (IS_HASHING_TT(i))
+                       mlx5_core_modify_tir(mdev, priv->tirn[i], in, inlen);
+}
+
 static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir,
                          const u8 *key, const u8 hfunc)
 {
        struct mlx5e_priv *priv = netdev_priv(dev);
-       bool close_open;
-       int err = 0;
+       int inlen = MLX5_ST_SZ_BYTES(modify_tir_in);
+       void *in;
 
        if ((hfunc != ETH_RSS_HASH_NO_CHANGE) &&
            (hfunc != ETH_RSS_HASH_XOR) &&
            (hfunc != ETH_RSS_HASH_TOP))
                return -EINVAL;
 
+       in = mlx5_vzalloc(inlen);
+       if (!in)
+               return -ENOMEM;
+
        mutex_lock(&priv->state_lock);
 
        if (indir) {
@@ -723,11 +743,6 @@ static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir,
                mlx5e_redirect_rqt(priv, MLX5E_INDIRECTION_RQT);
        }
 
-       close_open = (key || (hfunc != ETH_RSS_HASH_NO_CHANGE)) &&
-                    test_bit(MLX5E_STATE_OPENED, &priv->state);
-       if (close_open)
-               mlx5e_close_locked(dev);
-
        if (key)
                memcpy(priv->params.toeplitz_hash_key, key,
                       sizeof(priv->params.toeplitz_hash_key));
@@ -735,12 +750,13 @@ static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir,
        if (hfunc != ETH_RSS_HASH_NO_CHANGE)
                priv->params.rss_hfunc = hfunc;
 
-       if (close_open)
-               err = mlx5e_open_locked(priv->netdev);
+       mlx5e_modify_tirs_hash(priv, in, inlen);
 
        mutex_unlock(&priv->state_lock);
 
-       return err;
+       kvfree(in);
+
+       return 0;
 }
 
 static int mlx5e_get_rxnfc(struct net_device *netdev,
index d4e1c30452009718d9761a8a4949f5195d7f4c59..402994bf7e167d1ae23a8943789f712f82862f46 100644 (file)
@@ -141,6 +141,10 @@ void mlx5e_update_stats(struct mlx5e_priv *priv)
                return;
 
        /* Collect firts the SW counters and then HW for consistency */
+       s->rx_packets           = 0;
+       s->rx_bytes             = 0;
+       s->tx_packets           = 0;
+       s->tx_bytes             = 0;
        s->tso_packets          = 0;
        s->tso_bytes            = 0;
        s->tx_queue_stopped     = 0;
@@ -155,6 +159,8 @@ void mlx5e_update_stats(struct mlx5e_priv *priv)
        for (i = 0; i < priv->params.num_channels; i++) {
                rq_stats = &priv->channel[i]->rq.stats;
 
+               s->rx_packets   += rq_stats->packets;
+               s->rx_bytes     += rq_stats->bytes;
                s->lro_packets  += rq_stats->lro_packets;
                s->lro_bytes    += rq_stats->lro_bytes;
                s->rx_csum_none += rq_stats->csum_none;
@@ -164,6 +170,8 @@ void mlx5e_update_stats(struct mlx5e_priv *priv)
                for (j = 0; j < priv->params.num_tc; j++) {
                        sq_stats = &priv->channel[i]->sq[j].stats;
 
+                       s->tx_packets           += sq_stats->packets;
+                       s->tx_bytes             += sq_stats->bytes;
                        s->tso_packets          += sq_stats->tso_packets;
                        s->tso_bytes            += sq_stats->tso_bytes;
                        s->tx_queue_stopped     += sq_stats->stopped;
@@ -225,23 +233,6 @@ void mlx5e_update_stats(struct mlx5e_priv *priv)
        s->tx_broadcast_bytes   =
                MLX5_GET_CTR(out, transmitted_eth_broadcast.octets);
 
-       s->rx_packets =
-               s->rx_unicast_packets +
-               s->rx_multicast_packets +
-               s->rx_broadcast_packets;
-       s->rx_bytes =
-               s->rx_unicast_bytes +
-               s->rx_multicast_bytes +
-               s->rx_broadcast_bytes;
-       s->tx_packets =
-               s->tx_unicast_packets +
-               s->tx_multicast_packets +
-               s->tx_broadcast_packets;
-       s->tx_bytes =
-               s->tx_unicast_bytes +
-               s->tx_multicast_bytes +
-               s->tx_broadcast_bytes;
-
        /* Update calculated offload counters */
        s->tx_csum_offload = s->tx_packets - tx_offload_none;
        s->rx_csum_good    = s->rx_packets - s->rx_csum_none -
@@ -1199,7 +1190,6 @@ static void mlx5e_fill_indir_rqt_rqns(struct mlx5e_priv *priv, void *rqtc)
                        ix = mlx5e_bits_invert(i, MLX5E_LOG_INDIR_RQT_SIZE);
 
                ix = priv->params.indirection_rqt[ix];
-               ix = ix % priv->params.num_channels;
                MLX5_SET(rqtc, rqtc, rq_num[i],
                         test_bit(MLX5E_STATE_OPENED, &priv->state) ?
                         priv->channel[ix]->rq.rqn :
@@ -1317,7 +1307,22 @@ static void mlx5e_build_tir_ctx_lro(void *tirc, struct mlx5e_priv *priv)
                              lro_timer_supported_periods[2]));
 }
 
-static int mlx5e_modify_tir_lro(struct mlx5e_priv *priv, int tt)
+void mlx5e_build_tir_ctx_hash(void *tirc, struct mlx5e_priv *priv)
+{
+       MLX5_SET(tirc, tirc, rx_hash_fn,
+                mlx5e_rx_hash_fn(priv->params.rss_hfunc));
+       if (priv->params.rss_hfunc == ETH_RSS_HASH_TOP) {
+               void *rss_key = MLX5_ADDR_OF(tirc, tirc,
+                                            rx_hash_toeplitz_key);
+               size_t len = MLX5_FLD_SZ_BYTES(tirc,
+                                              rx_hash_toeplitz_key);
+
+               MLX5_SET(tirc, tirc, rx_hash_symmetric, 1);
+               memcpy(rss_key, priv->params.toeplitz_hash_key, len);
+       }
+}
+
+static int mlx5e_modify_tirs_lro(struct mlx5e_priv *priv)
 {
        struct mlx5_core_dev *mdev = priv->mdev;
 
@@ -1325,6 +1330,7 @@ static int mlx5e_modify_tir_lro(struct mlx5e_priv *priv, int tt)
        void *tirc;
        int inlen;
        int err;
+       int tt;
 
        inlen = MLX5_ST_SZ_BYTES(modify_tir_in);
        in = mlx5_vzalloc(inlen);
@@ -1336,7 +1342,11 @@ static int mlx5e_modify_tir_lro(struct mlx5e_priv *priv, int tt)
 
        mlx5e_build_tir_ctx_lro(tirc, priv);
 
-       err = mlx5_core_modify_tir(mdev, priv->tirn[tt], in, inlen);
+       for (tt = 0; tt < MLX5E_NUM_TT; tt++) {
+               err = mlx5_core_modify_tir(mdev, priv->tirn[tt], in, inlen);
+               if (err)
+                       break;
+       }
 
        kvfree(in);
 
@@ -1672,17 +1682,7 @@ static void mlx5e_build_tir_ctx(struct mlx5e_priv *priv, u32 *tirc, int tt)
        default:
                MLX5_SET(tirc, tirc, indirect_table,
                         priv->rqtn[MLX5E_INDIRECTION_RQT]);
-               MLX5_SET(tirc, tirc, rx_hash_fn,
-                        mlx5e_rx_hash_fn(priv->params.rss_hfunc));
-               if (priv->params.rss_hfunc == ETH_RSS_HASH_TOP) {
-                       void *rss_key = MLX5_ADDR_OF(tirc, tirc,
-                                                    rx_hash_toeplitz_key);
-                       size_t len = MLX5_FLD_SZ_BYTES(tirc,
-                                                      rx_hash_toeplitz_key);
-
-                       MLX5_SET(tirc, tirc, rx_hash_symmetric, 1);
-                       memcpy(rss_key, priv->params.toeplitz_hash_key, len);
-               }
+               mlx5e_build_tir_ctx_hash(tirc, priv);
                break;
        }
 
@@ -1885,8 +1885,10 @@ static int mlx5e_set_features(struct net_device *netdev,
                        mlx5e_close_locked(priv->netdev);
 
                priv->params.lro_en = !!(features & NETIF_F_LRO);
-               mlx5e_modify_tir_lro(priv, MLX5E_TT_IPV4_TCP);
-               mlx5e_modify_tir_lro(priv, MLX5E_TT_IPV6_TCP);
+               err = mlx5e_modify_tirs_lro(priv);
+               if (err)
+                       mlx5_core_warn(priv->mdev, "lro modify failed, %d\n",
+                                      err);
 
                if (was_opened)
                        err = mlx5e_open_locked(priv->netdev);
@@ -2089,12 +2091,20 @@ u16 mlx5e_get_max_inline_cap(struct mlx5_core_dev *mdev)
               2 /*sizeof(mlx5e_tx_wqe.inline_hdr_start)*/;
 }
 
+void mlx5e_build_default_indir_rqt(u32 *indirection_rqt, int len,
+                                  int num_channels)
+{
+       int i;
+
+       for (i = 0; i < len; i++)
+               indirection_rqt[i] = i % num_channels;
+}
+
 static void mlx5e_build_netdev_priv(struct mlx5_core_dev *mdev,
                                    struct net_device *netdev,
                                    int num_channels)
 {
        struct mlx5e_priv *priv = netdev_priv(netdev);
-       int i;
 
        priv->params.log_sq_size           =
                MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE;
@@ -2118,8 +2128,8 @@ static void mlx5e_build_netdev_priv(struct mlx5_core_dev *mdev,
        netdev_rss_key_fill(priv->params.toeplitz_hash_key,
                            sizeof(priv->params.toeplitz_hash_key));
 
-       for (i = 0; i < MLX5E_INDIR_RQT_SIZE; i++)
-               priv->params.indirection_rqt[i] = i % num_channels;
+       mlx5e_build_default_indir_rqt(priv->params.indirection_rqt,
+                                     MLX5E_INDIR_RQT_SIZE, num_channels);
 
        priv->params.lro_wqe_sz            =
                MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ;
index dd959d929aadd561ab78c252fe2127e1beb24fad..59658b9d05d1fc57c9da02f39f96a245929f42e4 100644 (file)
@@ -230,10 +230,6 @@ int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
        struct mlx5e_rq *rq = container_of(cq, struct mlx5e_rq, cq);
        int work_done;
 
-       /* avoid accessing cq (dma coherent memory) if not needed */
-       if (!test_and_clear_bit(MLX5E_CQ_HAS_CQES, &cq->flags))
-               return 0;
-
        for (work_done = 0; work_done < budget; work_done++) {
                struct mlx5e_rx_wqe *wqe;
                struct mlx5_cqe64 *cqe;
@@ -267,6 +263,7 @@ int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
 
                mlx5e_build_rx_skb(cqe, rq, skb);
                rq->stats.packets++;
+               rq->stats.bytes += be32_to_cpu(cqe->byte_cnt);
                napi_gro_receive(cq->napi, skb);
 
 wq_ll_pop:
@@ -279,8 +276,5 @@ wq_ll_pop:
        /* ensure cq space is freed before enabling more cqes */
        wmb();
 
-       if (work_done == budget)
-               set_bit(MLX5E_CQ_HAS_CQES, &cq->flags);
-
        return work_done;
 }
index 2c3fba0fff546179f68a53ab23fcdf7620ba188e..bb4eeeb007dec48eb022a6917f69d7fd5e1e5071 100644 (file)
@@ -179,6 +179,7 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
        unsigned int skb_len = skb->len;
        u8  opcode = MLX5_OPCODE_SEND;
        dma_addr_t dma_addr = 0;
+       unsigned int num_bytes;
        bool bf = false;
        u16 headlen;
        u16 ds_cnt;
@@ -204,8 +205,7 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
                opcode       = MLX5_OPCODE_LSO;
                ihs          = skb_transport_offset(skb) + tcp_hdrlen(skb);
                payload_len  = skb->len - ihs;
-               wi->num_bytes = skb->len +
-                               (skb_shinfo(skb)->gso_segs - 1) * ihs;
+               num_bytes = skb->len + (skb_shinfo(skb)->gso_segs - 1) * ihs;
                sq->stats.tso_packets++;
                sq->stats.tso_bytes += payload_len;
        } else {
@@ -213,9 +213,11 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
                     !skb->xmit_more &&
                     !skb_shinfo(skb)->nr_frags;
                ihs = mlx5e_get_inline_hdr_size(sq, skb, bf);
-               wi->num_bytes = max_t(unsigned int, skb->len, ETH_ZLEN);
+               num_bytes = max_t(unsigned int, skb->len, ETH_ZLEN);
        }
 
+       wi->num_bytes = num_bytes;
+
        if (skb_vlan_tag_present(skb)) {
                mlx5e_insert_vlan(eseg->inline_hdr_start, skb, ihs, &skb_data,
                                  &skb_len);
@@ -307,6 +309,7 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
        sq->bf_budget = bf ? sq->bf_budget - 1 : 0;
 
        sq->stats.packets++;
+       sq->stats.bytes += num_bytes;
        return NETDEV_TX_OK;
 
 dma_unmap_wqe_err:
@@ -335,10 +338,6 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq)
        u16 sqcc;
        int i;
 
-       /* avoid accessing cq (dma coherent memory) if not needed */
-       if (!test_and_clear_bit(MLX5E_CQ_HAS_CQES, &cq->flags))
-               return false;
-
        sq = container_of(cq, struct mlx5e_sq, cq);
 
        npkts = 0;
@@ -422,10 +421,6 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq)
                                netif_tx_wake_queue(sq->txq);
                                sq->stats.wake++;
        }
-       if (i == MLX5E_TX_CQ_POLL_BUDGET) {
-               set_bit(MLX5E_CQ_HAS_CQES, &cq->flags);
-               return true;
-       }
 
-       return false;
+       return (i == MLX5E_TX_CQ_POLL_BUDGET);
 }
index 4ac8d716dbddfd9650d50c9c2af15efe7089a7d6..66d51a77609e890263cd91277c6b8e7315d660e3 100644 (file)
@@ -88,7 +88,6 @@ void mlx5e_completion_event(struct mlx5_core_cq *mcq)
 {
        struct mlx5e_cq *cq = container_of(mcq, struct mlx5e_cq, mcq);
 
-       set_bit(MLX5E_CQ_HAS_CQES, &cq->flags);
        set_bit(MLX5E_CHANNEL_NAPI_SCHED, &cq->channel->flags);
        barrier();
        napi_schedule(cq->napi);
index c071077aafbdb53ad959538b290b203f2256335e..7992c553c1f5ce2e4de9cec4caa798e0da676531 100644 (file)
@@ -215,7 +215,7 @@ mlxsw_pci_queue_elem_info_producer_get(struct mlxsw_pci_queue *q)
 {
        int index = q->producer_counter & (q->count - 1);
 
-       if ((q->producer_counter - q->consumer_counter) == q->count)
+       if ((u16) (q->producer_counter - q->consumer_counter) == q->count)
                return NULL;
        return mlxsw_pci_queue_elem_info_get(q, index);
 }
index 09ce451c283bb8dfb5dd9c4c69bdeb04c0e796b3..a94daa8c346ca11ca10f6eed09b0c15a28dde2c4 100644 (file)
@@ -2358,9 +2358,7 @@ static int mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port,
        if (mlxsw_sp_port->bridged) {
                mlxsw_sp_port_active_vlans_del(mlxsw_sp_port);
                mlxsw_sp_port_bridge_leave(mlxsw_sp_port, false);
-
-               if (lag->ref_count == 1)
-                       mlxsw_sp_master_bridge_dec(mlxsw_sp, NULL);
+               mlxsw_sp_master_bridge_dec(mlxsw_sp, NULL);
        }
 
        if (lag->ref_count == 1) {
index 00cfd95ca59d53fe040ef5b002e7d30604d35e96..3e67f451f2ab918c95d7a6c3429d45b359e18978 100644 (file)
@@ -474,9 +474,9 @@ static int moxart_mac_probe(struct platform_device *pdev)
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        ndev->base_addr = res->start;
        priv->base = devm_ioremap_resource(p_dev, res);
-       ret = IS_ERR(priv->base);
-       if (ret) {
+       if (IS_ERR(priv->base)) {
                dev_err(p_dev, "devm_ioremap_resource failed\n");
+               ret = PTR_ERR(priv->base);
                goto init_fail;
        }
 
index 689a4a5c8dcfc30fba2675e17293e98691648dd2..1ef03939d25f478356112fbe376ff79cfd1e8eaa 100644 (file)
@@ -811,7 +811,7 @@ qcaspi_netdev_setup(struct net_device *dev)
        dev->netdev_ops = &qcaspi_netdev_ops;
        qcaspi_set_ethtool_ops(dev);
        dev->watchdog_timeo = QCASPI_TX_TIMEOUT;
-       dev->flags = IFF_MULTICAST;
+       dev->priv_flags &= ~IFF_TX_SKB_SHARING;
        dev->tx_queue_len = 100;
 
        qca = netdev_priv(dev);
index 537974cfd427091442acc9098a0a40535bbda431..dd2cf3738b738bdf5b21e73587c5ab9aeecbe3c7 100644 (file)
@@ -4933,8 +4933,6 @@ static void rtl_init_rxcfg(struct rtl8169_private *tp)
                RTL_W32(RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST);
                break;
        case RTL_GIGA_MAC_VER_40:
-               RTL_W32(RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST | RX_EARLY_OFF);
-               break;
        case RTL_GIGA_MAC_VER_41:
        case RTL_GIGA_MAC_VER_42:
        case RTL_GIGA_MAC_VER_43:
@@ -4943,8 +4941,6 @@ static void rtl_init_rxcfg(struct rtl8169_private *tp)
        case RTL_GIGA_MAC_VER_46:
        case RTL_GIGA_MAC_VER_47:
        case RTL_GIGA_MAC_VER_48:
-               RTL_W32(RxConfig, RX128_INT_EN | RX_DMA_BURST | RX_EARLY_OFF);
-               break;
        case RTL_GIGA_MAC_VER_49:
        case RTL_GIGA_MAC_VER_50:
        case RTL_GIGA_MAC_VER_51:
@@ -7730,10 +7726,13 @@ rtl8169_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
 {
        struct rtl8169_private *tp = netdev_priv(dev);
        void __iomem *ioaddr = tp->mmio_addr;
+       struct pci_dev *pdev = tp->pci_dev;
        struct rtl8169_counters *counters = tp->counters;
        unsigned int start;
 
-       if (netif_running(dev))
+       pm_runtime_get_noresume(&pdev->dev);
+
+       if (netif_running(dev) && pm_runtime_active(&pdev->dev))
                rtl8169_rx_missed(dev, ioaddr);
 
        do {
@@ -7761,7 +7760,8 @@ rtl8169_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
         * Fetch additonal counter values missing in stats collected by driver
         * from tally counters.
         */
-       rtl8169_update_counters(dev);
+       if (pm_runtime_active(&pdev->dev))
+               rtl8169_update_counters(dev);
 
        /*
         * Subtract values fetched during initalization.
@@ -7774,6 +7774,8 @@ rtl8169_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
        stats->tx_aborted_errors = le16_to_cpu(counters->tx_aborted) -
                le16_to_cpu(tp->tc_offset.tx_aborted);
 
+       pm_runtime_put_noidle(&pdev->dev);
+
        return stats;
 }
 
@@ -7853,6 +7855,10 @@ static int rtl8169_runtime_suspend(struct device *device)
 
        rtl8169_net_suspend(dev);
 
+       /* Update counters before going runtime suspend */
+       rtl8169_rx_missed(dev, tp->mmio_addr);
+       rtl8169_update_counters(dev);
+
        return 0;
 }
 
index 744d7806a9eec63e1c4fb18cc7b57fa96d73fcbc..86449c357168ebb4cd6c6fa25d1503583b8cd82d 100644 (file)
@@ -1722,7 +1722,6 @@ static int ravb_set_gti(struct net_device *ndev)
 static int ravb_probe(struct platform_device *pdev)
 {
        struct device_node *np = pdev->dev.of_node;
-       const struct of_device_id *match;
        struct ravb_private *priv;
        enum ravb_chip_id chip_id;
        struct net_device *ndev;
@@ -1754,8 +1753,7 @@ static int ravb_probe(struct platform_device *pdev)
        ndev->base_addr = res->start;
        ndev->dma = -1;
 
-       match = of_match_device(of_match_ptr(ravb_match_table), &pdev->dev);
-       chip_id = (enum ravb_chip_id)match->data;
+       chip_id = (enum ravb_chip_id)of_device_get_match_data(&pdev->dev);
 
        if (chip_id == RCAR_GEN3)
                irq = platform_get_irq_byname(pdev, "ch22");
index dfa9e59c9442884dfe6a52a5c6df0c4c70fa4737..7384499928761612775af0f8b1eaf701fcbb4d18 100644 (file)
@@ -3061,15 +3061,11 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
        mdp->ether_link_active_low = pd->ether_link_active_low;
 
        /* set cpu data */
-       if (id) {
+       if (id)
                mdp->cd = (struct sh_eth_cpu_data *)id->driver_data;
-       } else  {
-               const struct of_device_id *match;
+       else
+               mdp->cd = (struct sh_eth_cpu_data *)of_device_get_match_data(&pdev->dev);
 
-               match = of_match_device(of_match_ptr(sh_eth_match_table),
-                                       &pdev->dev);
-               mdp->cd = (struct sh_eth_cpu_data *)match->data;
-       }
        mdp->reg_offset = sh_eth_get_register_offset(mdp->cd->register_type);
        if (!mdp->reg_offset) {
                dev_err(&pdev->dev, "Unknown register type (%d)\n",
index 0faf1633603531a709358388b116d596d746e3e9..efb54f356a67c03e36856ed0047f5e3d9b2bf0f1 100644 (file)
@@ -199,21 +199,12 @@ int stmmac_mdio_register(struct net_device *ndev)
        struct stmmac_priv *priv = netdev_priv(ndev);
        struct stmmac_mdio_bus_data *mdio_bus_data = priv->plat->mdio_bus_data;
        int addr, found;
-       struct device_node *mdio_node = NULL;
-       struct device_node *child_node = NULL;
+       struct device_node *mdio_node = priv->plat->mdio_node;
 
        if (!mdio_bus_data)
                return 0;
 
        if (IS_ENABLED(CONFIG_OF)) {
-               for_each_child_of_node(priv->device->of_node, child_node) {
-                       if (of_device_is_compatible(child_node,
-                                                   "snps,dwmac-mdio")) {
-                               mdio_node = child_node;
-                               break;
-                       }
-               }
-
                if (mdio_node) {
                        netdev_dbg(ndev, "FOUND MDIO subnode\n");
                } else {
index 6a52fa18cbf2e94958bdfe44c4db258ababcdd02..4514ba73d96116317ca5ff8b797b037ae7434ffd 100644 (file)
@@ -110,6 +110,7 @@ stmmac_probe_config_dt(struct platform_device *pdev, const char **mac)
        struct device_node *np = pdev->dev.of_node;
        struct plat_stmmacenet_data *plat;
        struct stmmac_dma_cfg *dma_cfg;
+       struct device_node *child_node = NULL;
 
        plat = devm_kzalloc(&pdev->dev, sizeof(*plat), GFP_KERNEL);
        if (!plat)
@@ -140,13 +141,19 @@ stmmac_probe_config_dt(struct platform_device *pdev, const char **mac)
                plat->phy_node = of_node_get(np);
        }
 
+       for_each_child_of_node(np, child_node)
+               if (of_device_is_compatible(child_node, "snps,dwmac-mdio")) {
+                       plat->mdio_node = child_node;
+                       break;
+               }
+
        /* "snps,phy-addr" is not a standard property. Mark it as deprecated
         * and warn of its use. Remove this when phy node support is added.
         */
        if (of_property_read_u32(np, "snps,phy-addr", &plat->phy_addr) == 0)
                dev_warn(&pdev->dev, "snps,phy-addr property is deprecated\n");
 
-       if ((plat->phy_node && !of_phy_is_fixed_link(np)) || plat->phy_bus_name)
+       if ((plat->phy_node && !of_phy_is_fixed_link(np)) || !plat->mdio_node)
                plat->mdio_bus_data = NULL;
        else
                plat->mdio_bus_data =
index fc8bbff2d7e37ec19d807008c1e9b70040551ea2..af11ed1e0bcc09b4ed4fa5e94df8945fab4d581e 100644 (file)
 #define DWC_MMC_RXOCTETCOUNT_GB          0x0784
 #define DWC_MMC_RXPACKETCOUNT_GB         0x0780
 
-static int debug = 3;
+static int debug = -1;
 module_param(debug, int, 0);
 MODULE_PARM_DESC(debug, "DWC_eth_qos debug level (0=none,...,16=all)");
 
@@ -650,6 +650,11 @@ struct net_local {
        u32 mmc_tx_counters_mask;
 
        struct dwceqos_flowcontrol flowcontrol;
+
+       /* Tracks the intermediate state of phy started but hardware
+        * init not finished yet.
+        */
+       bool phy_defer;
 };
 
 static void dwceqos_read_mmc_counters(struct net_local *lp, u32 rx_mask,
@@ -901,6 +906,9 @@ static void dwceqos_adjust_link(struct net_device *ndev)
        struct phy_device *phydev = lp->phy_dev;
        int status_change = 0;
 
+       if (lp->phy_defer)
+               return;
+
        if (phydev->link) {
                if ((lp->speed != phydev->speed) ||
                    (lp->duplex != phydev->duplex)) {
@@ -1113,7 +1121,7 @@ static int dwceqos_descriptor_init(struct net_local *lp)
        /* Allocate DMA descriptors */
        size = DWCEQOS_RX_DCNT * sizeof(struct dwceqos_dma_desc);
        lp->rx_descs = dma_alloc_coherent(lp->ndev->dev.parent, size,
-                       &lp->rx_descs_addr, 0);
+                       &lp->rx_descs_addr, GFP_KERNEL);
        if (!lp->rx_descs)
                goto err_out;
        lp->rx_descs_tail_addr = lp->rx_descs_addr +
@@ -1121,7 +1129,7 @@ static int dwceqos_descriptor_init(struct net_local *lp)
 
        size = DWCEQOS_TX_DCNT * sizeof(struct dwceqos_dma_desc);
        lp->tx_descs = dma_alloc_coherent(lp->ndev->dev.parent, size,
-                       &lp->tx_descs_addr, 0);
+                       &lp->tx_descs_addr, GFP_KERNEL);
        if (!lp->tx_descs)
                goto err_out;
        lp->tx_descs_tail_addr = lp->tx_descs_addr +
@@ -1635,6 +1643,12 @@ static void dwceqos_init_hw(struct net_local *lp)
        regval = dwceqos_read(lp, REG_DWCEQOS_MAC_CFG);
        dwceqos_write(lp, REG_DWCEQOS_MAC_CFG,
                      regval | DWCEQOS_MAC_CFG_TE | DWCEQOS_MAC_CFG_RE);
+
+       lp->phy_defer = false;
+       mutex_lock(&lp->phy_dev->lock);
+       phy_read_status(lp->phy_dev);
+       dwceqos_adjust_link(lp->ndev);
+       mutex_unlock(&lp->phy_dev->lock);
 }
 
 static void dwceqos_tx_reclaim(unsigned long data)
@@ -1880,9 +1894,13 @@ static int dwceqos_open(struct net_device *ndev)
        }
        netdev_reset_queue(ndev);
 
+       /* The dwceqos reset state machine requires all phy clocks to complete,
+        * hence the unusual init order with phy_start first.
+        */
+       lp->phy_defer = true;
+       phy_start(lp->phy_dev);
        dwceqos_init_hw(lp);
        napi_enable(&lp->napi);
-       phy_start(lp->phy_dev);
 
        netif_start_queue(ndev);
        tasklet_enable(&lp->tx_bdreclaim_tasklet);
@@ -1915,18 +1933,19 @@ static int dwceqos_stop(struct net_device *ndev)
 {
        struct net_local *lp = netdev_priv(ndev);
 
-       phy_stop(lp->phy_dev);
-
        tasklet_disable(&lp->tx_bdreclaim_tasklet);
-       netif_stop_queue(ndev);
        napi_disable(&lp->napi);
 
-       dwceqos_drain_dma(lp);
+       /* Stop all tx before we drain the tx dma. */
+       netif_tx_lock_bh(lp->ndev);
+       netif_stop_queue(ndev);
+       netif_tx_unlock_bh(lp->ndev);
 
-       netif_tx_lock(lp->ndev);
+       dwceqos_drain_dma(lp);
        dwceqos_reset_hw(lp);
+       phy_stop(lp->phy_dev);
+
        dwceqos_descriptor_free(lp);
-       netif_tx_unlock(lp->ndev);
 
        return 0;
 }
@@ -2178,12 +2197,10 @@ static int dwceqos_start_xmit(struct sk_buff *skb, struct net_device *ndev)
                ((trans.initial_descriptor + trans.nr_descriptors) %
                 DWCEQOS_TX_DCNT));
 
-       dwceqos_tx_finalize(skb, lp, &trans);
-
-       netdev_sent_queue(ndev, skb->len);
-
        spin_lock_bh(&lp->tx_lock);
        lp->tx_free -= trans.nr_descriptors;
+       dwceqos_tx_finalize(skb, lp, &trans);
+       netdev_sent_queue(ndev, skb->len);
        spin_unlock_bh(&lp->tx_lock);
 
        ndev->trans_start = jiffies;
index 03833dbfca67d1ab53ca1af42398e7ea9dd04f01..dc85f7095e51038c0ebbb1182ba0d169ec0b2dd7 100644 (file)
@@ -297,6 +297,17 @@ static int kszphy_config_init(struct phy_device *phydev)
        if (priv->led_mode >= 0)
                kszphy_setup_led(phydev, type->led_mode_reg, priv->led_mode);
 
+       if (phy_interrupt_is_valid(phydev)) {
+               int ctl = phy_read(phydev, MII_BMCR);
+
+               if (ctl < 0)
+                       return ctl;
+
+               ret = phy_write(phydev, MII_BMCR, ctl & ~BMCR_ANENABLE);
+               if (ret < 0)
+                       return ret;
+       }
+
        return 0;
 }
 
@@ -635,6 +646,21 @@ static void kszphy_get_stats(struct phy_device *phydev,
                data[i] = kszphy_get_stat(phydev, i);
 }
 
+static int kszphy_resume(struct phy_device *phydev)
+{
+       int value;
+
+       mutex_lock(&phydev->lock);
+
+       value = phy_read(phydev, MII_BMCR);
+       phy_write(phydev, MII_BMCR, value & ~BMCR_PDOWN);
+
+       kszphy_config_intr(phydev);
+       mutex_unlock(&phydev->lock);
+
+       return 0;
+}
+
 static int kszphy_probe(struct phy_device *phydev)
 {
        const struct kszphy_type *type = phydev->drv->driver_data;
@@ -844,7 +870,7 @@ static struct phy_driver ksphy_driver[] = {
        .get_strings    = kszphy_get_strings,
        .get_stats      = kszphy_get_stats,
        .suspend        = genphy_suspend,
-       .resume         = genphy_resume,
+       .resume         = kszphy_resume,
 }, {
        .phy_id         = PHY_ID_KSZ8061,
        .name           = "Micrel KSZ8061",
index fc8ad001bc949e894e7659301af65565a5a21046..d61da9ece3ba021a7aa68253efe8dd82198cb575 100644 (file)
@@ -443,9 +443,14 @@ static ssize_t ppp_read(struct file *file, char __user *buf,
                         * network traffic (demand mode).
                         */
                        struct ppp *ppp = PF_TO_PPP(pf);
+
+                       ppp_recv_lock(ppp);
                        if (ppp->n_channels == 0 &&
-                           (ppp->flags & SC_LOOP_TRAFFIC) == 0)
+                           (ppp->flags & SC_LOOP_TRAFFIC) == 0) {
+                               ppp_recv_unlock(ppp);
                                break;
+                       }
+                       ppp_recv_unlock(ppp);
                }
                ret = -EAGAIN;
                if (file->f_flags & O_NONBLOCK)
@@ -532,9 +537,12 @@ static unsigned int ppp_poll(struct file *file, poll_table *wait)
        else if (pf->kind == INTERFACE) {
                /* see comment in ppp_read */
                struct ppp *ppp = PF_TO_PPP(pf);
+
+               ppp_recv_lock(ppp);
                if (ppp->n_channels == 0 &&
                    (ppp->flags & SC_LOOP_TRAFFIC) == 0)
                        mask |= POLLIN | POLLRDNORM;
+               ppp_recv_unlock(ppp);
        }
 
        return mask;
@@ -2808,6 +2816,7 @@ static struct ppp *ppp_create_interface(struct net *net, int unit,
 
 out2:
        mutex_unlock(&pn->all_ppp_mutex);
+       rtnl_unlock();
        free_netdev(dev);
 out1:
        *retp = ret;
index 224e7d82de6d2552121a2af643ea57d927e5b05b..cf77f2dffa698fa8399f5ae7b04162be8a1b34fe 100644 (file)
@@ -134,7 +134,6 @@ static void ax88172a_remove_mdio(struct usbnet *dev)
 
        netdev_info(dev->net, "deregistering mdio bus %s\n", priv->mdio->id);
        mdiobus_unregister(priv->mdio);
-       kfree(priv->mdio->irq);
        mdiobus_free(priv->mdio);
 }
 
index dc0212c3cc28cc36267d62cfebe726f51fe0c910..86ba30ba35e8fcf45f9eba287334da04b54203e2 100644 (file)
@@ -837,7 +837,11 @@ int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_
 
        iface_no = ctx->data->cur_altsetting->desc.bInterfaceNumber;
 
-       /* reset data interface */
+       /* Reset data interface. Some devices will not reset properly
+        * unless they are configured first.  Toggle the altsetting to
+        * force a reset
+        */
+       usb_set_interface(dev->udev, iface_no, data_altsetting);
        temp = usb_set_interface(dev->udev, iface_no, 0);
        if (temp) {
                dev_dbg(&intf->dev, "set interface failed\n");
@@ -984,8 +988,6 @@ EXPORT_SYMBOL_GPL(cdc_ncm_select_altsetting);
 
 static int cdc_ncm_bind(struct usbnet *dev, struct usb_interface *intf)
 {
-       int ret;
-
        /* MBIM backwards compatible function? */
        if (cdc_ncm_select_altsetting(intf) != CDC_NCM_COMM_ALTSETTING_NCM)
                return -ENODEV;
@@ -994,16 +996,7 @@ static int cdc_ncm_bind(struct usbnet *dev, struct usb_interface *intf)
         * Additionally, generic NCM devices are assumed to accept arbitrarily
         * placed NDP.
         */
-       ret = cdc_ncm_bind_common(dev, intf, CDC_NCM_DATA_ALTSETTING_NCM, 0);
-
-       /*
-        * We should get an event when network connection is "connected" or
-        * "disconnected". Set network connection in "disconnected" state
-        * (carrier is OFF) during attach, so the IP network stack does not
-        * start IPv6 negotiation and more.
-        */
-       usbnet_link_change(dev, 0, 0);
-       return ret;
+       return cdc_ncm_bind_common(dev, intf, CDC_NCM_DATA_ALTSETTING_NCM, 0);
 }
 
 static void cdc_ncm_align_tail(struct sk_buff *skb, size_t modulus, size_t remainder, size_t max)
@@ -1586,7 +1579,8 @@ static void cdc_ncm_status(struct usbnet *dev, struct urb *urb)
 
 static const struct driver_info cdc_ncm_info = {
        .description = "CDC NCM",
-       .flags = FLAG_POINTTOPOINT | FLAG_NO_SETINT | FLAG_MULTI_PACKET,
+       .flags = FLAG_POINTTOPOINT | FLAG_NO_SETINT | FLAG_MULTI_PACKET
+                       | FLAG_LINK_INTR,
        .bind = cdc_ncm_bind,
        .unbind = cdc_ncm_unbind,
        .manage_power = usbnet_manage_power,
@@ -1599,7 +1593,7 @@ static const struct driver_info cdc_ncm_info = {
 static const struct driver_info wwan_info = {
        .description = "Mobile Broadband Network Device",
        .flags = FLAG_POINTTOPOINT | FLAG_NO_SETINT | FLAG_MULTI_PACKET
-                       | FLAG_WWAN,
+                       | FLAG_LINK_INTR | FLAG_WWAN,
        .bind = cdc_ncm_bind,
        .unbind = cdc_ncm_unbind,
        .manage_power = usbnet_manage_power,
@@ -1612,7 +1606,7 @@ static const struct driver_info wwan_info = {
 static const struct driver_info wwan_noarp_info = {
        .description = "Mobile Broadband Network Device (NO ARP)",
        .flags = FLAG_POINTTOPOINT | FLAG_NO_SETINT | FLAG_MULTI_PACKET
-                       | FLAG_WWAN | FLAG_NOARP,
+                       | FLAG_LINK_INTR | FLAG_WWAN | FLAG_NOARP,
        .bind = cdc_ncm_bind,
        .unbind = cdc_ncm_unbind,
        .manage_power = usbnet_manage_power,
index 570deef53f74301896cfbb8e2ca5c622a5b67185..a3a4ccf7cf5272530ac8addf182c479feffd47fc 100644 (file)
@@ -861,8 +861,10 @@ static const struct usb_device_id products[] = {
        {QMI_FIXED_INTF(0x1199, 0x9056, 8)},    /* Sierra Wireless Modem */
        {QMI_FIXED_INTF(0x1199, 0x9057, 8)},
        {QMI_FIXED_INTF(0x1199, 0x9061, 8)},    /* Sierra Wireless Modem */
-       {QMI_FIXED_INTF(0x1199, 0x9071, 8)},    /* Sierra Wireless MC74xx/EM74xx */
-       {QMI_FIXED_INTF(0x1199, 0x9071, 10)},   /* Sierra Wireless MC74xx/EM74xx */
+       {QMI_FIXED_INTF(0x1199, 0x9071, 8)},    /* Sierra Wireless MC74xx */
+       {QMI_FIXED_INTF(0x1199, 0x9071, 10)},   /* Sierra Wireless MC74xx */
+       {QMI_FIXED_INTF(0x1199, 0x9079, 8)},    /* Sierra Wireless EM74xx */
+       {QMI_FIXED_INTF(0x1199, 0x9079, 10)},   /* Sierra Wireless EM74xx */
        {QMI_FIXED_INTF(0x1bbb, 0x011e, 4)},    /* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */
        {QMI_FIXED_INTF(0x1bbb, 0x0203, 2)},    /* Alcatel L800MA */
        {QMI_FIXED_INTF(0x2357, 0x0201, 4)},    /* TP-LINK HSUPA Modem MA180 */
@@ -885,6 +887,7 @@ static const struct usb_device_id products[] = {
        {QMI_FIXED_INTF(0x413c, 0x81a8, 8)},    /* Dell Wireless 5808 Gobi(TM) 4G LTE Mobile Broadband Card */
        {QMI_FIXED_INTF(0x413c, 0x81a9, 8)},    /* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card */
        {QMI_FIXED_INTF(0x413c, 0x81b1, 8)},    /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card */
+       {QMI_FIXED_INTF(0x413c, 0x81b3, 8)},    /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card (rev3) */
        {QMI_FIXED_INTF(0x03f0, 0x4e1d, 8)},    /* HP lt4111 LTE/EV-DO/HSPA+ Gobi 4G Module */
        {QMI_FIXED_INTF(0x22de, 0x9061, 3)},    /* WeTelecom WPD-600N */
        {QMI_FIXED_INTF(0x1e0e, 0x9001, 5)},    /* SIMCom 7230E */
index 0b0ba7ef14e4e9054adeb1251c29cddba08c5187..10798128c03fc64881c5a437df3510f016461b7c 100644 (file)
@@ -1769,6 +1769,13 @@ out3:
        if (info->unbind)
                info->unbind (dev, udev);
 out1:
+       /* subdrivers must undo all they did in bind() if they
+        * fail it, but we may fail later and a deferred kevent
+        * may trigger an error resubmitting itself and, worse,
+        * schedule a timer. So we kill it all just in case.
+        */
+       cancel_work_sync(&dev->kevent);
+       del_timer_sync(&dev->delay);
        free_netdev(net);
 out:
        return status;
index 0cbf520cea778fc703c9657ab2d085eee522634b..fc895d0e85d9cafced5d7e9d1166e8bc0de833bd 100644 (file)
@@ -814,7 +814,7 @@ vmxnet3_tq_init_all(struct vmxnet3_adapter *adapter)
 
 
 /*
- *    parse and copy relevant protocol headers:
+ *    parse relevant protocol headers:
  *      For a tso pkt, relevant headers are L2/3/4 including options
  *      For a pkt requesting csum offloading, they are L2/3 and may include L4
  *      if it's a TCP/UDP pkt
@@ -827,15 +827,14 @@ vmxnet3_tq_init_all(struct vmxnet3_adapter *adapter)
  * Other effects:
  *    1. related *ctx fields are updated.
  *    2. ctx->copy_size is # of bytes copied
- *    3. the portion copied is guaranteed to be in the linear part
+ *    3. the portion to be copied is guaranteed to be in the linear part
  *
  */
 static int
-vmxnet3_parse_and_copy_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
-                          struct vmxnet3_tx_ctx *ctx,
-                          struct vmxnet3_adapter *adapter)
+vmxnet3_parse_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
+                 struct vmxnet3_tx_ctx *ctx,
+                 struct vmxnet3_adapter *adapter)
 {
-       struct Vmxnet3_TxDataDesc *tdd;
        u8 protocol = 0;
 
        if (ctx->mss) { /* TSO */
@@ -892,16 +891,34 @@ vmxnet3_parse_and_copy_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
                return 0;
        }
 
+       return 1;
+err:
+       return -1;
+}
+
+/*
+ *    copy relevant protocol headers to the transmit ring:
+ *      For a tso pkt, relevant headers are L2/3/4 including options
+ *      For a pkt requesting csum offloading, they are L2/3 and may include L4
+ *      if it's a TCP/UDP pkt
+ *
+ *
+ *    Note that this requires that vmxnet3_parse_hdr be called first to set the
+ *      appropriate bits in ctx first
+ */
+static void
+vmxnet3_copy_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
+                struct vmxnet3_tx_ctx *ctx,
+                struct vmxnet3_adapter *adapter)
+{
+       struct Vmxnet3_TxDataDesc *tdd;
+
        tdd = tq->data_ring.base + tq->tx_ring.next2fill;
 
        memcpy(tdd->data, skb->data, ctx->copy_size);
        netdev_dbg(adapter->netdev,
                "copy %u bytes to dataRing[%u]\n",
                ctx->copy_size, tq->tx_ring.next2fill);
-       return 1;
-
-err:
-       return -1;
 }
 
 
@@ -998,22 +1015,7 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
                }
        }
 
-       spin_lock_irqsave(&tq->tx_lock, flags);
-
-       if (count > vmxnet3_cmd_ring_desc_avail(&tq->tx_ring)) {
-               tq->stats.tx_ring_full++;
-               netdev_dbg(adapter->netdev,
-                       "tx queue stopped on %s, next2comp %u"
-                       " next2fill %u\n", adapter->netdev->name,
-                       tq->tx_ring.next2comp, tq->tx_ring.next2fill);
-
-               vmxnet3_tq_stop(tq, adapter);
-               spin_unlock_irqrestore(&tq->tx_lock, flags);
-               return NETDEV_TX_BUSY;
-       }
-
-
-       ret = vmxnet3_parse_and_copy_hdr(skb, tq, &ctx, adapter);
+       ret = vmxnet3_parse_hdr(skb, tq, &ctx, adapter);
        if (ret >= 0) {
                BUG_ON(ret <= 0 && ctx.copy_size != 0);
                /* hdrs parsed, check against other limits */
@@ -1033,9 +1035,26 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
                }
        } else {
                tq->stats.drop_hdr_inspect_err++;
-               goto unlock_drop_pkt;
+               goto drop_pkt;
        }
 
+       spin_lock_irqsave(&tq->tx_lock, flags);
+
+       if (count > vmxnet3_cmd_ring_desc_avail(&tq->tx_ring)) {
+               tq->stats.tx_ring_full++;
+               netdev_dbg(adapter->netdev,
+                       "tx queue stopped on %s, next2comp %u"
+                       " next2fill %u\n", adapter->netdev->name,
+                       tq->tx_ring.next2comp, tq->tx_ring.next2fill);
+
+               vmxnet3_tq_stop(tq, adapter);
+               spin_unlock_irqrestore(&tq->tx_lock, flags);
+               return NETDEV_TX_BUSY;
+       }
+
+
+       vmxnet3_copy_hdr(skb, tq, &ctx, adapter);
+
        /* fill tx descs related to addr & len */
        if (vmxnet3_map_pkt(skb, &ctx, tq, adapter->pdev, adapter))
                goto unlock_drop_pkt;
index 66addb7a7911beb33f17c916caa7bb48ae84507e..bdcf617a9d52b86eb41c13dad6df1c3c42d3d319 100644 (file)
@@ -104,20 +104,23 @@ static struct dst_ops vrf_dst_ops = {
 #if IS_ENABLED(CONFIG_IPV6)
 static bool check_ipv6_frame(const struct sk_buff *skb)
 {
-       const struct ipv6hdr *ipv6h = (struct ipv6hdr *)skb->data;
-       size_t hlen = sizeof(*ipv6h);
+       const struct ipv6hdr *ipv6h;
+       struct ipv6hdr _ipv6h;
        bool rc = true;
 
-       if (skb->len < hlen)
+       ipv6h = skb_header_pointer(skb, 0, sizeof(_ipv6h), &_ipv6h);
+       if (!ipv6h)
                goto out;
 
        if (ipv6h->nexthdr == NEXTHDR_ICMP) {
                const struct icmp6hdr *icmph;
+               struct icmp6hdr _icmph;
 
-               if (skb->len < hlen + sizeof(*icmph))
+               icmph = skb_header_pointer(skb, sizeof(_ipv6h),
+                                          sizeof(_icmph), &_icmph);
+               if (!icmph)
                        goto out;
 
-               icmph = (struct icmp6hdr *)(skb->data + sizeof(*ipv6h));
                switch (icmph->icmp6_type) {
                case NDISC_ROUTER_SOLICITATION:
                case NDISC_ROUTER_ADVERTISEMENT:
index e6944b29588e1bd329c5b331988373d5297c0bd6..1c32bd10479730a73f2832fa7f15ef3d845941db 100644 (file)
@@ -931,8 +931,10 @@ static int vxlan_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
                                                     cb->nlh->nlmsg_seq,
                                                     RTM_NEWNEIGH,
                                                     NLM_F_MULTI, rd);
-                               if (err < 0)
+                               if (err < 0) {
+                                       cb->args[1] = err;
                                        goto out;
+                               }
 skip:
                                ++idx;
                        }
@@ -1306,8 +1308,10 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
                gbp = (struct vxlanhdr_gbp *)vxh;
                md->gbp = ntohs(gbp->policy_id);
 
-               if (tun_dst)
+               if (tun_dst) {
                        tun_dst->u.tun_info.key.tun_flags |= TUNNEL_VXLAN_OPT;
+                       tun_dst->u.tun_info.options_len = sizeof(*md);
+               }
 
                if (gbp->dont_learn)
                        md->gbp |= VXLAN_GBP_DONT_LEARN;
index 4ed5180c547bb6ce8af288a6653220b5cf2c61b4..0ccc697fef76cf25c94443c4cde9d0fb1185df05 100644 (file)
@@ -107,7 +107,7 @@ static int iwl_send_tx_ant_cfg(struct iwl_mvm *mvm, u8 valid_tx_ant)
                                    sizeof(tx_ant_cmd), &tx_ant_cmd);
 }
 
-static void iwl_free_fw_paging(struct iwl_mvm *mvm)
+void iwl_free_fw_paging(struct iwl_mvm *mvm)
 {
        int i;
 
@@ -127,6 +127,8 @@ static void iwl_free_fw_paging(struct iwl_mvm *mvm)
                             get_order(mvm->fw_paging_db[i].fw_paging_size));
        }
        kfree(mvm->trans->paging_download_buf);
+       mvm->trans->paging_download_buf = NULL;
+
        memset(mvm->fw_paging_db, 0, sizeof(mvm->fw_paging_db));
 }
 
index 5f3ac8cccf49d2c5a07644f894b1ba920dca8bfc..ff7c6df9f9418ebac294709f63c21911459cfac1 100644 (file)
@@ -1225,6 +1225,9 @@ void iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm,
 void iwl_mvm_rx_umac_scan_iter_complete_notif(struct iwl_mvm *mvm,
                                              struct iwl_rx_cmd_buffer *rxb);
 
+/* Paging */
+void iwl_free_fw_paging(struct iwl_mvm *mvm);
+
 /* MVM debugfs */
 #ifdef CONFIG_IWLWIFI_DEBUGFS
 int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir);
index 89ea70deeb84410edd0e3ad07ae77abc08476107..e80be9a595207bcf962551cc2bde42d14b898a03 100644 (file)
@@ -684,6 +684,8 @@ static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode)
        for (i = 0; i < NVM_MAX_NUM_SECTIONS; i++)
                kfree(mvm->nvm_sections[i].data);
 
+       iwl_free_fw_paging(mvm);
+
        iwl_mvm_tof_clean(mvm);
 
        ieee80211_free_hw(mvm->hw);
index 0914ec2fd57467023b0f5336b2e630856c06cf81..a040edc550570a11373ed30b4916b7930c7fa832 100644 (file)
@@ -423,6 +423,15 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
                return -1;
        }
 
+       /*
+        * Increase the pending frames counter, so that later when a reply comes
+        * in and the counter is decreased - we don't start getting negative
+        * values.
+        * Note that we don't need to make sure it isn't agg'd, since we're
+        * TXing non-sta
+        */
+       atomic_inc(&mvm->pending_frames[sta_id]);
+
        return 0;
 }
 
index 39c4be41ef83d6cf23302fb9af6270f8857f85de..365dc7e83ab43cd3473d0da57d0ea768784869c8 100644 (file)
@@ -305,6 +305,7 @@ EXPORT_SYMBOL(of_phy_find_device);
  * @dev: pointer to net_device claiming the phy
  * @phy_np: Pointer to device tree node for the PHY
  * @hndlr: Link state callback for the network device
+ * @flags: flags to pass to the PHY
  * @iface: PHY data interface type
  *
  * If successful, returns a pointer to the phy_device with the embedded
index 602eb422351060c611967538359b8409885397a1..f89db3af0607263648c9a64432bcb1efdaeb8326 100644 (file)
@@ -4772,8 +4772,10 @@ int pci_get_new_domain_nr(void)
 void pci_bus_assign_domain_nr(struct pci_bus *bus, struct device *parent)
 {
        static int use_dt_domains = -1;
-       int domain = of_get_pci_domain_nr(parent->of_node);
+       int domain = -1;
 
+       if (parent)
+               domain = of_get_pci_domain_nr(parent->of_node);
        /*
         * Check DT domain and use_dt_domains values.
         *
index cb61f300f8b5d111ce7871a39be43a1b7fe3ca80..277b5c8c825ca4a63864a01ba67cbd8765b5a041 100644 (file)
@@ -67,7 +67,7 @@ static const u8 DASD_DIAG_CMS1[] = { 0xc3, 0xd4, 0xe2, 0xf1 };/* EBCDIC CMS1 */
  * and function code cmd.
  * In case of an exception return 3. Otherwise return result of bitwise OR of
  * resulting condition code and DIAG return code. */
-static inline int dia250(void *iob, int cmd)
+static inline int __dia250(void *iob, int cmd)
 {
        register unsigned long reg2 asm ("2") = (unsigned long) iob;
        typedef union {
@@ -77,7 +77,6 @@ static inline int dia250(void *iob, int cmd)
        int rc;
 
        rc = 3;
-       diag_stat_inc(DIAG_STAT_X250);
        asm volatile(
                "       diag    2,%2,0x250\n"
                "0:     ipm     %0\n"
@@ -91,6 +90,12 @@ static inline int dia250(void *iob, int cmd)
        return rc;
 }
 
+static inline int dia250(void *iob, int cmd)
+{
+       diag_stat_inc(DIAG_STAT_X250);
+       return __dia250(iob, cmd);
+}
+
 /* Initialize block I/O to DIAG device using the specified blocksize and
  * block offset. On success, return zero and set end_block to contain the
  * number of blocks on the device minus the specified offset. Return non-zero
index 6a4ff27f4357eb229815c18b535327487f2df580..c688efa95e29b0561ac14be901cc4e4faa5aaf33 100644 (file)
@@ -204,8 +204,8 @@ static bool spi_imx_can_dma(struct spi_master *master, struct spi_device *spi,
 {
        struct spi_imx_data *spi_imx = spi_master_get_devdata(master);
 
-       if (spi_imx->dma_is_inited &&
-           transfer->len > spi_imx->wml * sizeof(u32))
+       if (spi_imx->dma_is_inited && transfer->len >= spi_imx->wml &&
+           (transfer->len % spi_imx->wml) == 0)
                return true;
        return false;
 }
@@ -919,8 +919,6 @@ static int spi_imx_dma_transfer(struct spi_imx_data *spi_imx,
        struct dma_async_tx_descriptor *desc_tx = NULL, *desc_rx = NULL;
        int ret;
        unsigned long timeout;
-       u32 dma;
-       int left;
        struct spi_master *master = spi_imx->bitbang.master;
        struct sg_table *tx = &transfer->tx_sg, *rx = &transfer->rx_sg;
 
@@ -954,13 +952,6 @@ static int spi_imx_dma_transfer(struct spi_imx_data *spi_imx,
        /* Trigger the cspi module. */
        spi_imx->dma_finished = 0;
 
-       dma = readl(spi_imx->base + MX51_ECSPI_DMA);
-       dma = dma & (~MX51_ECSPI_DMA_RXT_WML_MASK);
-       /* Change RX_DMA_LENGTH trigger dma fetch tail data */
-       left = transfer->len % spi_imx->wml;
-       if (left)
-               writel(dma | (left << MX51_ECSPI_DMA_RXT_WML_OFFSET),
-                               spi_imx->base + MX51_ECSPI_DMA);
        /*
         * Set these order to avoid potential RX overflow. The overflow may
         * happen if we enable SPI HW before starting RX DMA due to rescheduling
@@ -992,10 +983,6 @@ static int spi_imx_dma_transfer(struct spi_imx_data *spi_imx,
                        spi_imx->devtype_data->reset(spi_imx);
                        dmaengine_terminate_all(master->dma_rx);
                }
-               dma &= ~MX51_ECSPI_DMA_RXT_WML_MASK;
-               writel(dma |
-                      spi_imx->wml << MX51_ECSPI_DMA_RXT_WML_OFFSET,
-                      spi_imx->base + MX51_ECSPI_DMA);
        }
 
        spi_imx->dma_finished = 1;
index 79a8bc4f6cec9e32ec9c78627a2e68fc85ab5e12..7cb1b2d710c10f7aab6c245a31d34e5439dfcbf0 100644 (file)
@@ -749,6 +749,7 @@ static int rockchip_spi_probe(struct platform_device *pdev)
        return 0;
 
 err_register_master:
+       pm_runtime_disable(&pdev->dev);
        if (rs->dma_tx.ch)
                dma_release_channel(rs->dma_tx.ch);
        if (rs->dma_rx.ch)
@@ -778,6 +779,8 @@ static int rockchip_spi_remove(struct platform_device *pdev)
        if (rs->dma_rx.ch)
                dma_release_channel(rs->dma_rx.ch);
 
+       spi_master_put(master);
+
        return 0;
 }
 
index 0c675861623f4abeb2054b65c87fe185d8188bf3..d8e4219c2324900c583245407a7c78f20e20325e 100644 (file)
@@ -83,6 +83,7 @@ config SSB_SDIOHOST
 config SSB_HOST_SOC
        bool "Support for SSB bus on SoC"
        depends on SSB && BCM47XX_NVRAM
+       select SSB_SPROM
        help
          Host interface for a SSB directly mapped into memory. This is
          for some Broadcom SoCs from the BCM47xx and BCM53xx lines.
index 82a663ba98009fb5f286a88651c9b3a54cf68bbb..4f229e711e1c1cfc0134abb71b617f9511a6e606 100644 (file)
@@ -177,7 +177,6 @@ void core_tmr_abort_task(
 
                if (!__target_check_io_state(se_cmd, se_sess, 0)) {
                        spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
-                       target_put_sess_cmd(se_cmd);
                        goto out;
                }
                list_del_init(&se_cmd->se_cmd_list);
index 711172450da642ccdcf4ac7dcceb7546faf66fad..bbb2ad78377020ac85158fb61df067aceadaafe5 100644 (file)
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -1056,6 +1056,7 @@ EXPORT_SYMBOL_GPL(dax_pmd_fault);
 int dax_pfn_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
 {
        struct file *file = vma->vm_file;
+       int error;
 
        /*
         * We pass NO_SECTOR to dax_radix_entry() because we expect that a
@@ -1065,7 +1066,13 @@ int dax_pfn_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
         * saves us from having to make a call to get_block() here to look
         * up the sector.
         */
-       dax_radix_entry(file->f_mapping, vmf->pgoff, NO_SECTOR, false, true);
+       error = dax_radix_entry(file->f_mapping, vmf->pgoff, NO_SECTOR, false,
+                       true);
+
+       if (error == -ENOMEM)
+               return VM_FAULT_OOM;
+       if (error)
+               return VM_FAULT_SIGBUS;
        return VM_FAULT_NOPAGE;
 }
 EXPORT_SYMBOL_GPL(dax_pfn_mkwrite);
index e032a0423e351cabfd6fe6156cdfc9c53ebff216..4098acc701c3e66d5e8a229ed49828d69997655f 100644 (file)
@@ -390,6 +390,7 @@ data_copy:
                *err = ext4_get_block(orig_inode, orig_blk_offset + i, bh, 0);
                if (*err < 0)
                        break;
+               bh = bh->b_this_page;
        }
        if (!*err)
                *err = block_commit_write(pagep[0], from, from + replaced_size);
index d211b8e18566719c873838268217b143966e5965..30c4c9ebb693faaecf249df4c913c01bbaa58f20 100644 (file)
@@ -843,9 +843,14 @@ static int jffs2_rename (struct inode *old_dir_i, struct dentry *old_dentry,
 
                pr_notice("%s(): Link succeeded, unlink failed (err %d). You now have a hard link\n",
                          __func__, ret);
-               /* Might as well let the VFS know */
-               d_instantiate(new_dentry, d_inode(old_dentry));
-               ihold(d_inode(old_dentry));
+               /*
+                * We can't keep the target in dcache after that.
+                * For one thing, we can't afford dentry aliases for directories.
+                * For another, if there was a victim, we _can't_ set new inode
+                * for that sucker and we have to trigger mount eviction - the
+                * caller won't do it on its own since we are returning an error.
+                */
+               d_invalidate(new_dentry);
                new_dir_i->i_mtime = new_dir_i->i_ctime = ITIME(now);
                return ret;
        }
index 26c2de2de13fd0ce7bb55287c58e268cf4bf37e6..b7f8eaeea5d83d8a1cb66e2d697de5341e7361cd 100644 (file)
@@ -633,7 +633,7 @@ ncp_fill_cache(struct file *file, struct dir_context *ctx,
                                d_rehash(newdent);
                } else {
                        spin_lock(&dentry->d_lock);
-                       NCP_FINFO(inode)->flags &= ~NCPI_DIR_CACHE;
+                       NCP_FINFO(dir)->flags &= ~NCPI_DIR_CACHE;
                        spin_unlock(&dentry->d_lock);
                }
        } else {
index 9581d190f6e12346e70226c93458df0a1791abe0..77ebc2bc1cca112056501fe4dc160d48cc7069cd 100644 (file)
@@ -147,6 +147,10 @@ static int ocfs2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
        ret = ocfs2_inode_lock(inode, &di_bh, 1);
        if (ret < 0) {
                mlog_errno(ret);
+               if (ret == -ENOMEM)
+                       ret = VM_FAULT_OOM;
+               else
+                       ret = VM_FAULT_SIGBUS;
                goto out;
        }
 
index ed95272d57a61af3f26a58c71cf789d3c94e28ce..52f6de5d40a9211baa29a39fc2a178598a86a92f 100644 (file)
@@ -618,7 +618,8 @@ static int ovl_remove_upper(struct dentry *dentry, bool is_dir)
         * sole user of this dentry.  Too tricky...  Just unhash for
         * now.
         */
-       d_drop(dentry);
+       if (!err)
+               d_drop(dentry);
        inode_unlock(dir);
 
        return err;
@@ -903,6 +904,13 @@ static int ovl_rename2(struct inode *olddir, struct dentry *old,
        if (!overwrite && new_is_dir && !old_opaque && new_opaque)
                ovl_remove_opaque(newdentry);
 
+       /*
+        * Old dentry now lives in different location. Dentries in
+        * lowerstack are stale. We cannot drop them here because
+        * access to them is lockless. This could be only pure upper
+        * or opaque directory - numlower is zero. Or upper non-dir
+        * entry - its pureness is tracked by flag opaque.
+        */
        if (old_opaque != new_opaque) {
                ovl_dentry_set_opaque(old, new_opaque);
                if (!overwrite)
index 49e204560655a8f1737a2a03a38b7a88604a5a4c..a4ff5d0d7db91605880b4eb350ea37aa397c9c9d 100644 (file)
@@ -65,6 +65,8 @@ int ovl_setattr(struct dentry *dentry, struct iattr *attr)
 
                inode_lock(upperdentry->d_inode);
                err = notify_change(upperdentry, attr, NULL);
+               if (!err)
+                       ovl_copyattr(upperdentry->d_inode, dentry->d_inode);
                inode_unlock(upperdentry->d_inode);
        }
        ovl_drop_write(dentry);
index 8d826bd56b26b10d641d7ce1f7a47b747851fac9..619ad4b016d209adcb2b6649d6f654049c860b07 100644 (file)
@@ -76,12 +76,14 @@ enum ovl_path_type ovl_path_type(struct dentry *dentry)
        if (oe->__upperdentry) {
                type = __OVL_PATH_UPPER;
 
-               if (oe->numlower) {
-                       if (S_ISDIR(dentry->d_inode->i_mode))
-                               type |= __OVL_PATH_MERGE;
-               } else if (!oe->opaque) {
+               /*
+                * Non-dir dentry can hold lower dentry from previous
+                * location. Its purity depends only on opaque flag.
+                */
+               if (oe->numlower && S_ISDIR(dentry->d_inode->i_mode))
+                       type |= __OVL_PATH_MERGE;
+               else if (!oe->opaque)
                        type |= __OVL_PATH_PURE;
-               }
        } else {
                if (oe->numlower > 1)
                        type |= __OVL_PATH_MERGE;
@@ -341,6 +343,7 @@ static const struct dentry_operations ovl_dentry_operations = {
 
 static const struct dentry_operations ovl_reval_dentry_operations = {
        .d_release = ovl_dentry_release,
+       .d_select_inode = ovl_d_select_inode,
        .d_revalidate = ovl_dentry_revalidate,
        .d_weak_revalidate = ovl_dentry_weak_revalidate,
 };
index 594f7e63b432427fd5b6448afa0a75eb0b71d558..be5568839442d1ab50bf5cae293b7b2b133b525e 100644 (file)
@@ -1109,27 +1109,10 @@ xlog_verify_head(
        bool                    tmp_wrapped;
 
        /*
-        * Search backwards through the log looking for the log record header
-        * block. This wraps all the way back around to the head so something is
-        * seriously wrong if we can't find it.
-        */
-       found = xlog_rseek_logrec_hdr(log, *head_blk, *head_blk, 1, bp, rhead_blk,
-                                     rhead, wrapped);
-       if (found < 0)
-               return found;
-       if (!found) {
-               xfs_warn(log->l_mp, "%s: couldn't find sync record", __func__);
-               return -EIO;
-       }
-
-       *tail_blk = BLOCK_LSN(be64_to_cpu((*rhead)->h_tail_lsn));
-
-       /*
-        * Now that we have a tail block, check the head of the log for torn
-        * writes. Search again until we hit the tail or the maximum number of
-        * log record I/Os that could have been in flight at one time. Use a
-        * temporary buffer so we don't trash the rhead/bp pointer from the
-        * call above.
+        * Check the head of the log for torn writes. Search backwards from the
+        * head until we hit the tail or the maximum number of log record I/Os
+        * that could have been in flight at one time. Use a temporary buffer so
+        * we don't trash the rhead/bp pointers from the caller.
         */
        tmp_bp = xlog_get_bp(log, 1);
        if (!tmp_bp)
@@ -1215,6 +1198,115 @@ xlog_verify_head(
        return error;
 }
 
+/*
+ * Check whether the head of the log points to an unmount record. In other
+ * words, determine whether the log is clean. If so, update the in-core state
+ * appropriately.
+ */
+static int
+xlog_check_unmount_rec(
+       struct xlog             *log,
+       xfs_daddr_t             *head_blk,
+       xfs_daddr_t             *tail_blk,
+       struct xlog_rec_header  *rhead,
+       xfs_daddr_t             rhead_blk,
+       struct xfs_buf          *bp,
+       bool                    *clean)
+{
+       struct xlog_op_header   *op_head;
+       xfs_daddr_t             umount_data_blk;
+       xfs_daddr_t             after_umount_blk;
+       int                     hblks;
+       int                     error;
+       char                    *offset;
+
+       *clean = false;
+
+       /*
+        * Look for unmount record. If we find it, then we know there was a
+        * clean unmount. Since 'i' could be the last block in the physical
+        * log, we convert to a log block before comparing to the head_blk.
+        *
+        * Save the current tail lsn to use to pass to xlog_clear_stale_blocks()
+        * below. We won't want to clear the unmount record if there is one, so
+        * we pass the lsn of the unmount record rather than the block after it.
+        */
+       if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
+               int     h_size = be32_to_cpu(rhead->h_size);
+               int     h_version = be32_to_cpu(rhead->h_version);
+
+               if ((h_version & XLOG_VERSION_2) &&
+                   (h_size > XLOG_HEADER_CYCLE_SIZE)) {
+                       hblks = h_size / XLOG_HEADER_CYCLE_SIZE;
+                       if (h_size % XLOG_HEADER_CYCLE_SIZE)
+                               hblks++;
+               } else {
+                       hblks = 1;
+               }
+       } else {
+               hblks = 1;
+       }
+       after_umount_blk = rhead_blk + hblks + BTOBB(be32_to_cpu(rhead->h_len));
+       after_umount_blk = do_mod(after_umount_blk, log->l_logBBsize);
+       if (*head_blk == after_umount_blk &&
+           be32_to_cpu(rhead->h_num_logops) == 1) {
+               umount_data_blk = rhead_blk + hblks;
+               umount_data_blk = do_mod(umount_data_blk, log->l_logBBsize);
+               error = xlog_bread(log, umount_data_blk, 1, bp, &offset);
+               if (error)
+                       return error;
+
+               op_head = (struct xlog_op_header *)offset;
+               if (op_head->oh_flags & XLOG_UNMOUNT_TRANS) {
+                       /*
+                        * Set tail and last sync so that newly written log
+                        * records will point recovery to after the current
+                        * unmount record.
+                        */
+                       xlog_assign_atomic_lsn(&log->l_tail_lsn,
+                                       log->l_curr_cycle, after_umount_blk);
+                       xlog_assign_atomic_lsn(&log->l_last_sync_lsn,
+                                       log->l_curr_cycle, after_umount_blk);
+                       *tail_blk = after_umount_blk;
+
+                       *clean = true;
+               }
+       }
+
+       return 0;
+}
+
+static void
+xlog_set_state(
+       struct xlog             *log,
+       xfs_daddr_t             head_blk,
+       struct xlog_rec_header  *rhead,
+       xfs_daddr_t             rhead_blk,
+       bool                    bump_cycle)
+{
+       /*
+        * Reset log values according to the state of the log when we
+        * crashed.  In the case where head_blk == 0, we bump curr_cycle
+        * one because the next write starts a new cycle rather than
+        * continuing the cycle of the last good log record.  At this
+        * point we have guaranteed that all partial log records have been
+        * accounted for.  Therefore, we know that the last good log record
+        * written was complete and ended exactly on the end boundary
+        * of the physical log.
+        */
+       log->l_prev_block = rhead_blk;
+       log->l_curr_block = (int)head_blk;
+       log->l_curr_cycle = be32_to_cpu(rhead->h_cycle);
+       if (bump_cycle)
+               log->l_curr_cycle++;
+       atomic64_set(&log->l_tail_lsn, be64_to_cpu(rhead->h_tail_lsn));
+       atomic64_set(&log->l_last_sync_lsn, be64_to_cpu(rhead->h_lsn));
+       xlog_assign_grant_head(&log->l_reserve_head.grant, log->l_curr_cycle,
+                                       BBTOB(log->l_curr_block));
+       xlog_assign_grant_head(&log->l_write_head.grant, log->l_curr_cycle,
+                                       BBTOB(log->l_curr_block));
+}
+
 /*
  * Find the sync block number or the tail of the log.
  *
@@ -1238,22 +1330,20 @@ xlog_find_tail(
        xfs_daddr_t             *tail_blk)
 {
        xlog_rec_header_t       *rhead;
-       xlog_op_header_t        *op_head;
        char                    *offset = NULL;
        xfs_buf_t               *bp;
        int                     error;
-       xfs_daddr_t             umount_data_blk;
-       xfs_daddr_t             after_umount_blk;
        xfs_daddr_t             rhead_blk;
        xfs_lsn_t               tail_lsn;
-       int                     hblks;
        bool                    wrapped = false;
+       bool                    clean = false;
 
        /*
         * Find previous log record
         */
        if ((error = xlog_find_head(log, head_blk)))
                return error;
+       ASSERT(*head_blk < INT_MAX);
 
        bp = xlog_get_bp(log, 1);
        if (!bp)
@@ -1271,99 +1361,74 @@ xlog_find_tail(
        }
 
        /*
-        * Trim the head block back to skip over torn records. We can have
-        * multiple log I/Os in flight at any time, so we assume CRC failures
-        * back through the previous several records are torn writes and skip
-        * them.
+        * Search backwards through the log looking for the log record header
+        * block. This wraps all the way back around to the head so something is
+        * seriously wrong if we can't find it.
         */
-       ASSERT(*head_blk < INT_MAX);
-       error = xlog_verify_head(log, head_blk, tail_blk, bp, &rhead_blk,
-                                &rhead, &wrapped);
-       if (error)
-               goto done;
+       error = xlog_rseek_logrec_hdr(log, *head_blk, *head_blk, 1, bp,
+                                     &rhead_blk, &rhead, &wrapped);
+       if (error < 0)
+               return error;
+       if (!error) {
+               xfs_warn(log->l_mp, "%s: couldn't find sync record", __func__);
+               return -EIO;
+       }
+       *tail_blk = BLOCK_LSN(be64_to_cpu(rhead->h_tail_lsn));
 
        /*
-        * Reset log values according to the state of the log when we
-        * crashed.  In the case where head_blk == 0, we bump curr_cycle
-        * one because the next write starts a new cycle rather than
-        * continuing the cycle of the last good log record.  At this
-        * point we have guaranteed that all partial log records have been
-        * accounted for.  Therefore, we know that the last good log record
-        * written was complete and ended exactly on the end boundary
-        * of the physical log.
+        * Set the log state based on the current head record.
         */
-       log->l_prev_block = rhead_blk;
-       log->l_curr_block = (int)*head_blk;
-       log->l_curr_cycle = be32_to_cpu(rhead->h_cycle);
-       if (wrapped)
-               log->l_curr_cycle++;
-       atomic64_set(&log->l_tail_lsn, be64_to_cpu(rhead->h_tail_lsn));
-       atomic64_set(&log->l_last_sync_lsn, be64_to_cpu(rhead->h_lsn));
-       xlog_assign_grant_head(&log->l_reserve_head.grant, log->l_curr_cycle,
-                                       BBTOB(log->l_curr_block));
-       xlog_assign_grant_head(&log->l_write_head.grant, log->l_curr_cycle,
-                                       BBTOB(log->l_curr_block));
+       xlog_set_state(log, *head_blk, rhead, rhead_blk, wrapped);
+       tail_lsn = atomic64_read(&log->l_tail_lsn);
 
        /*
-        * Look for unmount record.  If we find it, then we know there
-        * was a clean unmount.  Since 'i' could be the last block in
-        * the physical log, we convert to a log block before comparing
-        * to the head_blk.
+        * Look for an unmount record at the head of the log. This sets the log
+        * state to determine whether recovery is necessary.
+        */
+       error = xlog_check_unmount_rec(log, head_blk, tail_blk, rhead,
+                                      rhead_blk, bp, &clean);
+       if (error)
+               goto done;
+
+       /*
+        * Verify the log head if the log is not clean (e.g., we have anything
+        * but an unmount record at the head). This uses CRC verification to
+        * detect and trim torn writes. If discovered, CRC failures are
+        * considered torn writes and the log head is trimmed accordingly.
         *
-        * Save the current tail lsn to use to pass to
-        * xlog_clear_stale_blocks() below.  We won't want to clear the
-        * unmount record if there is one, so we pass the lsn of the
-        * unmount record rather than the block after it.
+        * Note that we can only run CRC verification when the log is dirty
+        * because there's no guarantee that the log data behind an unmount
+        * record is compatible with the current architecture.
         */
-       if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
-               int     h_size = be32_to_cpu(rhead->h_size);
-               int     h_version = be32_to_cpu(rhead->h_version);
+       if (!clean) {
+               xfs_daddr_t     orig_head = *head_blk;
 
-               if ((h_version & XLOG_VERSION_2) &&
-                   (h_size > XLOG_HEADER_CYCLE_SIZE)) {
-                       hblks = h_size / XLOG_HEADER_CYCLE_SIZE;
-                       if (h_size % XLOG_HEADER_CYCLE_SIZE)
-                               hblks++;
-               } else {
-                       hblks = 1;
-               }
-       } else {
-               hblks = 1;
-       }
-       after_umount_blk = rhead_blk + hblks + BTOBB(be32_to_cpu(rhead->h_len));
-       after_umount_blk = do_mod(after_umount_blk, log->l_logBBsize);
-       tail_lsn = atomic64_read(&log->l_tail_lsn);
-       if (*head_blk == after_umount_blk &&
-           be32_to_cpu(rhead->h_num_logops) == 1) {
-               umount_data_blk = rhead_blk + hblks;
-               umount_data_blk = do_mod(umount_data_blk, log->l_logBBsize);
-               error = xlog_bread(log, umount_data_blk, 1, bp, &offset);
+               error = xlog_verify_head(log, head_blk, tail_blk, bp,
+                                        &rhead_blk, &rhead, &wrapped);
                if (error)
                        goto done;
 
-               op_head = (xlog_op_header_t *)offset;
-               if (op_head->oh_flags & XLOG_UNMOUNT_TRANS) {
-                       /*
-                        * Set tail and last sync so that newly written
-                        * log records will point recovery to after the
-                        * current unmount record.
-                        */
-                       xlog_assign_atomic_lsn(&log->l_tail_lsn,
-                                       log->l_curr_cycle, after_umount_blk);
-                       xlog_assign_atomic_lsn(&log->l_last_sync_lsn,
-                                       log->l_curr_cycle, after_umount_blk);
-                       *tail_blk = after_umount_blk;
-
-                       /*
-                        * Note that the unmount was clean. If the unmount
-                        * was not clean, we need to know this to rebuild the
-                        * superblock counters from the perag headers if we
-                        * have a filesystem using non-persistent counters.
-                        */
-                       log->l_mp->m_flags |= XFS_MOUNT_WAS_CLEAN;
+               /* update in-core state again if the head changed */
+               if (*head_blk != orig_head) {
+                       xlog_set_state(log, *head_blk, rhead, rhead_blk,
+                                      wrapped);
+                       tail_lsn = atomic64_read(&log->l_tail_lsn);
+                       error = xlog_check_unmount_rec(log, head_blk, tail_blk,
+                                                      rhead, rhead_blk, bp,
+                                                      &clean);
+                       if (error)
+                               goto done;
                }
        }
 
+       /*
+        * Note that the unmount was clean. If the unmount was not clean, we
+        * need to know this to rebuild the superblock counters from the perag
+        * headers if we have a filesystem using non-persistent counters.
+        */
+       if (clean)
+               log->l_mp->m_flags |= XFS_MOUNT_WAS_CLEAN;
+
        /*
         * Make sure that there are no blocks in front of the head
         * with the same cycle number as the head.  This can happen
index 5bfc61943f88a90f0b4df3d0cb04d42319a6d731..34f601e7b88d3a75f237a17664da6a253d7857c2 100644 (file)
 #define ACPI_GET_FUNCTION_NAME          _acpi_function_name
 
 /*
- * The Name parameter should be the procedure name as a quoted string.
+ * The Name parameter should be the procedure name as a non-quoted string.
  * The function name is also used by the function exit macros below.
  * Note: (const char) is used to be compatible with the debug interfaces
  * and macros such as __func__.
index c96621e87c196bf8fa6aa7c9f5033c625efc4cb2..17556979dc79bffdb26b928d6f6348edfae4d095 100644 (file)
@@ -897,11 +897,9 @@ ACPI_MSG_DEPENDENT_RETURN_VOID(ACPI_PRINTF_LIKE(3)
                                acpi_warning(const char *module_name,
                                             u32 line_number,
                                             const char *format, ...))
-ACPI_MSG_DEPENDENT_RETURN_VOID(ACPI_PRINTF_LIKE(3)
+ACPI_MSG_DEPENDENT_RETURN_VOID(ACPI_PRINTF_LIKE(1)
                                void ACPI_INTERNAL_VAR_XFACE
-                               acpi_info(const char *module_name,
-                                         u32 line_number,
-                                         const char *format, ...))
+                               acpi_info(const char *format, ...))
 ACPI_MSG_DEPENDENT_RETURN_VOID(ACPI_PRINTF_LIKE(3)
                                void ACPI_INTERNAL_VAR_XFACE
                                acpi_bios_error(const char *module_name,
index 07fb100bcc688ddc1835b958ac430182ee699128..6f1805dd5d3c60e58e648fdd2750c76bda6d7ec8 100644 (file)
@@ -9,6 +9,7 @@
 #define ACPI_PROCESSOR_CLASS           "processor"
 #define ACPI_PROCESSOR_DEVICE_NAME     "Processor"
 #define ACPI_PROCESSOR_DEVICE_HID      "ACPI0007"
+#define ACPI_PROCESSOR_CONTAINER_HID   "ACPI0010"
 
 #define ACPI_PROCESSOR_BUSY_METRIC     10
 
@@ -394,14 +395,6 @@ static inline int acpi_processor_hotplug(struct acpi_processor *pr)
 }
 #endif /* CONFIG_ACPI_PROCESSOR_IDLE */
 
-#if defined(CONFIG_PM_SLEEP) & defined(CONFIG_ACPI_PROCESSOR_IDLE)
-void acpi_processor_syscore_init(void);
-void acpi_processor_syscore_exit(void);
-#else
-static inline void acpi_processor_syscore_init(void) {}
-static inline void acpi_processor_syscore_exit(void) {}
-#endif
-
 /* in processor_thermal.c */
 int acpi_processor_get_limit_info(struct acpi_processor *pr);
 extern const struct thermal_cooling_device_ops processor_cooling_ops;
index cb68888241084e33af2d7e11753e048431cf7837..88bc64f00bb53cb01fe60ca87dfafd7c9c0f0f4a 100644 (file)
@@ -320,11 +320,6 @@ static inline void bio_get_last_bvec(struct bio *bio, struct bio_vec *bv)
        struct bvec_iter iter = bio->bi_iter;
        int idx;
 
-       if (!bio_flagged(bio, BIO_CLONED)) {
-               *bv = bio->bi_io_vec[bio->bi_vcnt - 1];
-               return;
-       }
-
        if (unlikely(!bio_multiple_segments(bio))) {
                *bv = bio_iovec(bio);
                return;
index 75857cda38e989e5a44150c5abb9bd1bd4872957..728ef074602a69d6046e6ff68f10ea84bdc172cd 100644 (file)
@@ -386,7 +386,7 @@ static inline void dma_free_attrs(struct device *dev, size_t size,
        if (dma_release_from_coherent(dev, get_order(size), cpu_addr))
                return;
 
-       if (!ops->free)
+       if (!ops->free || !cpu_addr)
                return;
 
        debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
index 0e95fcc75b2ac141ceeb3ebb3b2f79d7a5d2ddad..358076eda364ec929d5a5181c4c37663e4d78c40 100644 (file)
@@ -125,6 +125,16 @@ struct irqaction {
 
 extern irqreturn_t no_action(int cpl, void *dev_id);
 
+/*
+ * If a (PCI) device interrupt is not connected we set dev->irq to
+ * IRQ_NOTCONNECTED. This causes request_irq() to fail with -ENOTCONN, so we
+ * can distingiush that case from other error returns.
+ *
+ * 0x80000000 is guaranteed to be outside the available range of interrupts
+ * and easy to distinguish from other possible incorrect values.
+ */
+#define IRQ_NOTCONNECTED       (1U << 31)
+
 extern int __must_check
 request_threaded_irq(unsigned int irq, irq_handler_t handler,
                     irq_handler_t thread_fn,
index 4b9f85c963d0738e20ee7c89e2dcb28ba3cc1aaa..0fdc798e3ff795a9dffd75e36e90ce2b61141562 100644 (file)
@@ -1,6 +1,7 @@
 #ifndef _LINUX_KASAN_H
 #define _LINUX_KASAN_H
 
+#include <linux/sched.h>
 #include <linux/types.h>
 
 struct kmem_cache;
@@ -13,7 +14,6 @@ struct vm_struct;
 
 #include <asm/kasan.h>
 #include <asm/pgtable.h>
-#include <linux/sched.h>
 
 extern unsigned char kasan_zero_page[PAGE_SIZE];
 extern pte_t kasan_zero_pte[PTRS_PER_PTE];
@@ -43,6 +43,8 @@ static inline void kasan_disable_current(void)
 
 void kasan_unpoison_shadow(const void *address, size_t size);
 
+void kasan_unpoison_task_stack(struct task_struct *task);
+
 void kasan_alloc_pages(struct page *page, unsigned int order);
 void kasan_free_pages(struct page *page, unsigned int order);
 
@@ -66,6 +68,8 @@ void kasan_free_shadow(const struct vm_struct *vm);
 
 static inline void kasan_unpoison_shadow(const void *address, size_t size) {}
 
+static inline void kasan_unpoison_task_stack(struct task_struct *task) {}
+
 static inline void kasan_enable_current(void) {}
 static inline void kasan_disable_current(void) {}
 
index 30cf4200ab40ee40fdae694291e36fe869b508bc..5356f4d661a721ba0446b1183e2a834f3bf3b56f 100644 (file)
@@ -113,17 +113,6 @@ extern void __list_del_entry(struct list_head *entry);
 extern void list_del(struct list_head *entry);
 #endif
 
-#ifdef CONFIG_DEBUG_LIST
-/*
- * See devm_memremap_pages() which wants DEBUG_LIST=y to assert if one
- * of the pages it allocates is ever passed to list_add()
- */
-extern void list_force_poison(struct list_head *entry);
-#else
-/* fallback to the less strict LIST_POISON* definitions */
-#define list_force_poison list_del
-#endif
-
 /**
  * list_replace - replace old entry by new one
  * @old : the element to be replaced
index 51f1e540fc2b83bf9dd143637bad8acdbc44d0fd..58eef02edc7e81ab2dcd14aace9c722b76a89623 100644 (file)
@@ -4245,7 +4245,9 @@ struct mlx5_ifc_modify_tir_bitmask_bits {
 
        u8         reserved_at_20[0x1b];
        u8         self_lb_en[0x1];
-       u8         reserved_at_3c[0x3];
+       u8         reserved_at_3c[0x1];
+       u8         hash[0x1];
+       u8         reserved_at_3e[0x1];
        u8         lro[0x1];
 };
 
index 4ce9ff7086f4897a67f6037d88df21012cf28413..d3fcd4591ce4ac1312a35710c81a9ff51211c140 100644 (file)
@@ -1985,6 +1985,30 @@ static inline void skb_reserve(struct sk_buff *skb, int len)
        skb->tail += len;
 }
 
+/**
+ *     skb_tailroom_reserve - adjust reserved_tailroom
+ *     @skb: buffer to alter
+ *     @mtu: maximum amount of headlen permitted
+ *     @needed_tailroom: minimum amount of reserved_tailroom
+ *
+ *     Set reserved_tailroom so that headlen can be as large as possible but
+ *     not larger than mtu and tailroom cannot be smaller than
+ *     needed_tailroom.
+ *     The required headroom should already have been reserved before using
+ *     this function.
+ */
+static inline void skb_tailroom_reserve(struct sk_buff *skb, unsigned int mtu,
+                                       unsigned int needed_tailroom)
+{
+       SKB_LINEAR_ASSERT(skb);
+       if (mtu < skb_tailroom(skb) - needed_tailroom)
+               /* use at most mtu */
+               skb->reserved_tailroom = skb_tailroom(skb) - mtu;
+       else
+               /* use up to all available space */
+               skb->reserved_tailroom = needed_tailroom;
+}
+
 #define ENCAP_TYPE_ETHER       0
 #define ENCAP_TYPE_IPPROTO     1
 
index eead8ab93c0a36e402741ee767d3c3bc70128964..881a79d524675d7411b8985fd4a91110f69fd8e5 100644 (file)
@@ -100,6 +100,7 @@ struct plat_stmmacenet_data {
        int interface;
        struct stmmac_mdio_bus_data *mdio_bus_data;
        struct device_node *phy_node;
+       struct device_node *mdio_node;
        struct stmmac_dma_cfg *dma_cfg;
        int clk_csr;
        int has_gmac;
index acfdbf353a0b5bc7cfeb6ae58fa7e6a6acfa16ba..be586c632a0c04da3887ae6a0cf28357df98ef5c 100644 (file)
@@ -134,9 +134,6 @@ extern void syscall_unregfunc(void);
                void *it_func;                                          \
                void *__data;                                           \
                                                                        \
-               if (!cpu_online(raw_smp_processor_id()))                \
-                       return;                                         \
-                                                                       \
                if (!(cond))                                            \
                        return;                                         \
                prercu;                                                 \
@@ -343,15 +340,19 @@ extern void syscall_unregfunc(void);
  * "void *__data, proto" as the callback prototype.
  */
 #define DECLARE_TRACE_NOARGS(name)                                     \
-               __DECLARE_TRACE(name, void, , 1, void *__data, __data)
+       __DECLARE_TRACE(name, void, ,                                   \
+                       cpu_online(raw_smp_processor_id()),             \
+                       void *__data, __data)
 
 #define DECLARE_TRACE(name, proto, args)                               \
-               __DECLARE_TRACE(name, PARAMS(proto), PARAMS(args), 1,   \
-                               PARAMS(void *__data, proto),            \
-                               PARAMS(__data, args))
+       __DECLARE_TRACE(name, PARAMS(proto), PARAMS(args),              \
+                       cpu_online(raw_smp_processor_id()),             \
+                       PARAMS(void *__data, proto),                    \
+                       PARAMS(__data, args))
 
 #define DECLARE_TRACE_CONDITION(name, proto, args, cond)               \
-       __DECLARE_TRACE(name, PARAMS(proto), PARAMS(args), PARAMS(cond), \
+       __DECLARE_TRACE(name, PARAMS(proto), PARAMS(args),              \
+                       cpu_online(raw_smp_processor_id()) && (PARAMS(cond)), \
                        PARAMS(void *__data, proto),                    \
                        PARAMS(__data, args))
 
index 8f81bbbc38fc939070a5761e3af90da62faf8d68..e0f4109e64c6fca9ba87d768c2c7b1220a6557f4 100644 (file)
@@ -439,6 +439,12 @@ int dev_get_wireless_info(char *buffer, char **start, off_t offset, int length);
 /* Send a single event to user space */
 void wireless_send_event(struct net_device *dev, unsigned int cmd,
                         union iwreq_data *wrqu, const char *extra);
+#ifdef CONFIG_WEXT_CORE
+/* flush all previous wext events - if work is done from netdev notifiers */
+void wireless_nlevent_flush(void);
+#else
+static inline void wireless_nlevent_flush(void) {}
+#endif
 
 /* We may need a function to send a stream of events to user space.
  * More on that later... */
index 317a1ed2f4acc7745c3e02955c60e8c1dc8eb7c3..9130dd5a184a25e7d6c6cf0b633d7d5c0c4fc0f6 100644 (file)
@@ -231,13 +231,13 @@ TRACE_EVENT(snd_soc_jack_report,
        TP_ARGS(jack, mask, val),
 
        TP_STRUCT__entry(
-               __string(       name,           jack->jack->name        )
+               __string(       name,           jack->jack->id          )
                __field(        int,            mask                    )
                __field(        int,            val                     )
        ),
 
        TP_fast_assign(
-               __assign_str(name, jack->jack->name);
+               __assign_str(name, jack->jack->id);
                __entry->mask = mask;
                __entry->val = val;
        ),
@@ -253,12 +253,12 @@ TRACE_EVENT(snd_soc_jack_notify,
        TP_ARGS(jack, val),
 
        TP_STRUCT__entry(
-               __string(       name,           jack->jack->name        )
+               __string(       name,           jack->jack->id          )
                __field(        int,            val                     )
        ),
 
        TP_fast_assign(
-               __assign_str(name, jack->jack->name);
+               __assign_str(name, jack->jack->id);
                __entry->val = val;
        ),
 
index aa6f8571de136b74fba93996883bd69b3e28d412..5df4881dea7b5e8e42fc1274f967331a7d455600 100644 (file)
@@ -292,6 +292,9 @@ enum bpf_func_id {
 /* BPF_FUNC_skb_set_tunnel_key and BPF_FUNC_skb_get_tunnel_key flags. */
 #define BPF_F_TUNINFO_IPV6             (1ULL << 0)
 
+/* BPF_FUNC_skb_set_tunnel_key flags. */
+#define BPF_F_ZERO_CSUM_TX             (1ULL << 1)
+
 /* user accessible mirror of in-kernel sk_buff.
  * new fields can only be added to the end of this structure
  */
index 625b38f65764b34fe9d942339ef61a08af817564..a8e3a8c0d85a9173c7aa248b84e8e4e9fd01739f 100644 (file)
@@ -120,7 +120,7 @@ struct media_device_info {
 
 #define MEDIA_ENT_F_V4L2_SUBDEV_UNKNOWN        MEDIA_ENT_F_OLD_SUBDEV_BASE
 
-#ifndef __KERNEL__
+#if !defined(__KERNEL__) || defined(__NEED_MEDIA_LEGACY_API)
 
 /*
  * Legacy symbols used to avoid userspace compilation breakages
@@ -133,6 +133,10 @@ struct media_device_info {
 #define MEDIA_ENT_TYPE_MASK            0x00ff0000
 #define MEDIA_ENT_SUBTYPE_MASK         0x0000ffff
 
+/* End of the old subdev reserved numberspace */
+#define MEDIA_ENT_T_DEVNODE_UNKNOWN    (MEDIA_ENT_T_DEVNODE | \
+                                        MEDIA_ENT_SUBTYPE_MASK)
+
 #define MEDIA_ENT_T_DEVNODE            MEDIA_ENT_F_OLD_BASE
 #define MEDIA_ENT_T_DEVNODE_V4L                MEDIA_ENT_F_IO_V4L
 #define MEDIA_ENT_T_DEVNODE_FB         (MEDIA_ENT_T_DEVNODE + 2)
index 841187239adc8e3bb0f72fb4ab0ca6a7fec2f618..e79e60f50bce056bc9c658d24e00bdbe553a2f61 100644 (file)
@@ -1609,6 +1609,9 @@ int request_threaded_irq(unsigned int irq, irq_handler_t handler,
        struct irq_desc *desc;
        int retval;
 
+       if (irq == IRQ_NOTCONNECTED)
+               return -ENOTCONN;
+
        /*
         * Sanity-check: shared interrupts must pass in a real dev-ID,
         * otherwise we'll have trouble later trying to figure out
@@ -1699,9 +1702,13 @@ EXPORT_SYMBOL(request_threaded_irq);
 int request_any_context_irq(unsigned int irq, irq_handler_t handler,
                            unsigned long flags, const char *name, void *dev_id)
 {
-       struct irq_desc *desc = irq_to_desc(irq);
+       struct irq_desc *desc;
        int ret;
 
+       if (irq == IRQ_NOTCONNECTED)
+               return -ENOTCONN;
+
+       desc = irq_to_desc(irq);
        if (!desc)
                return -EINVAL;
 
index b981a7b023f04c356df5b852f60caeae232fdf45..6cf54615a9c45181b8efdb6b61975b490b525b8f 100644 (file)
@@ -29,10 +29,10 @@ __weak void __iomem *ioremap_cache(resource_size_t offset, unsigned long size)
 
 static void *try_ram_remap(resource_size_t offset, size_t size)
 {
-       struct page *page = pfn_to_page(offset >> PAGE_SHIFT);
+       unsigned long pfn = PHYS_PFN(offset);
 
        /* In the simple case just return the existing linear address */
-       if (!PageHighMem(page))
+       if (pfn_valid(pfn) && !PageHighMem(pfn_to_page(pfn)))
                return __va(offset);
        return NULL; /* fallback to ioremap_cache */
 }
@@ -270,13 +270,16 @@ struct dev_pagemap *find_dev_pagemap(resource_size_t phys)
 void *devm_memremap_pages(struct device *dev, struct resource *res,
                struct percpu_ref *ref, struct vmem_altmap *altmap)
 {
-       int is_ram = region_intersects(res->start, resource_size(res),
-                       "System RAM");
        resource_size_t key, align_start, align_size, align_end;
        struct dev_pagemap *pgmap;
        struct page_map *page_map;
+       int error, nid, is_ram;
        unsigned long pfn;
-       int error, nid;
+
+       align_start = res->start & ~(SECTION_SIZE - 1);
+       align_size = ALIGN(res->start + resource_size(res), SECTION_SIZE)
+               - align_start;
+       is_ram = region_intersects(align_start, align_size, "System RAM");
 
        if (is_ram == REGION_MIXED) {
                WARN_ONCE(1, "%s attempted on mixed region %pr\n",
@@ -314,8 +317,6 @@ void *devm_memremap_pages(struct device *dev, struct resource *res,
 
        mutex_lock(&pgmap_lock);
        error = 0;
-       align_start = res->start & ~(SECTION_SIZE - 1);
-       align_size = ALIGN(resource_size(res), SECTION_SIZE);
        align_end = align_start + align_size - 1;
        for (key = align_start; key <= align_end; key += SECTION_SIZE) {
                struct dev_pagemap *dup;
@@ -351,8 +352,13 @@ void *devm_memremap_pages(struct device *dev, struct resource *res,
        for_each_device_pfn(pfn, page_map) {
                struct page *page = pfn_to_page(pfn);
 
-               /* ZONE_DEVICE pages must never appear on a slab lru */
-               list_force_poison(&page->lru);
+               /*
+                * ZONE_DEVICE pages union ->lru with a ->pgmap back
+                * pointer.  It is a bug if a ZONE_DEVICE page is ever
+                * freed or placed on a driver-private list.  Seed the
+                * storage with LIST_POISON* values.
+                */
+               list_del(&page->lru);
                page->pgmap = pgmap;
        }
        devres_add(dev, page_map);
index 9503d590e5ef5b81537947b9925ecfe689f72a91..41f6b2215aa86b2275347ea5e25ff9db59dd933f 100644 (file)
@@ -26,6 +26,7 @@
  *              Thomas Gleixner, Mike Kravetz
  */
 
+#include <linux/kasan.h>
 #include <linux/mm.h>
 #include <linux/module.h>
 #include <linux/nmi.h>
@@ -5096,6 +5097,8 @@ void init_idle(struct task_struct *idle, int cpu)
        idle->state = TASK_RUNNING;
        idle->se.exec_start = sched_clock();
 
+       kasan_unpoison_task_stack(idle);
+
 #ifdef CONFIG_SMP
        /*
         * Its possible that init_idle() gets called multiple times on a task,
index 3345a089ef7b954d475d67965b8ce891c1b16ab0..3859bf63561c63936947b007fe3ee20e822509a1 100644 (file)
 #include <linux/kernel.h>
 #include <linux/rculist.h>
 
-static struct list_head force_poison;
-void list_force_poison(struct list_head *entry)
-{
-       entry->next = &force_poison;
-       entry->prev = &force_poison;
-}
-
 /*
  * Insert a new entry between two known consecutive entries.
  *
@@ -30,8 +23,6 @@ void __list_add(struct list_head *new,
                              struct list_head *prev,
                              struct list_head *next)
 {
-       WARN(new->next == &force_poison || new->prev == &force_poison,
-               "list_add attempted on force-poisoned entry\n");
        WARN(next->prev != prev,
                "list_add corruption. next->prev should be "
                "prev (%p), but was %p. (next=%p).\n",
index 3461d97ecb30bfe18d1ed50729e5147aa2ad7e6d..da7a35d83de7edc4ed562caa5e1925194f2c7229 100644 (file)
@@ -195,6 +195,30 @@ void __delete_from_page_cache(struct page *page, void *shadow,
        else
                cleancache_invalidate_page(mapping, page);
 
+       VM_BUG_ON_PAGE(page_mapped(page), page);
+       if (!IS_ENABLED(CONFIG_DEBUG_VM) && unlikely(page_mapped(page))) {
+               int mapcount;
+
+               pr_alert("BUG: Bad page cache in process %s  pfn:%05lx\n",
+                        current->comm, page_to_pfn(page));
+               dump_page(page, "still mapped when deleted");
+               dump_stack();
+               add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
+
+               mapcount = page_mapcount(page);
+               if (mapping_exiting(mapping) &&
+                   page_count(page) >= mapcount + 2) {
+                       /*
+                        * All vmas have already been torn down, so it's
+                        * a good bet that actually the page is unmapped,
+                        * and we'd prefer not to leak it: if we're wrong,
+                        * some other bad page check should catch it later.
+                        */
+                       page_mapcount_reset(page);
+                       atomic_sub(mapcount, &page->_count);
+               }
+       }
+
        page_cache_tree_delete(mapping, page, shadow);
 
        page->mapping = NULL;
@@ -205,7 +229,6 @@ void __delete_from_page_cache(struct page *page, void *shadow,
                __dec_zone_page_state(page, NR_FILE_PAGES);
        if (PageSwapBacked(page))
                __dec_zone_page_state(page, NR_SHMEM);
-       VM_BUG_ON_PAGE(page_mapped(page), page);
 
        /*
         * At this point page must be either written or cleaned by truncate.
index 01f2b48c8618a9f973eeb11f2162b75bb8cf67d4..aefba5a9cc47f74880dbbb3491a8927fcf999b06 100644 (file)
@@ -2751,7 +2751,7 @@ static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
        int ret;
 
        if (!hugepages_supported())
-               return -ENOTSUPP;
+               return -EOPNOTSUPP;
 
        table->data = &tmp;
        table->maxlen = sizeof(unsigned long);
@@ -2792,7 +2792,7 @@ int hugetlb_overcommit_handler(struct ctl_table *table, int write,
        int ret;
 
        if (!hugepages_supported())
-               return -ENOTSUPP;
+               return -EOPNOTSUPP;
 
        tmp = h->nr_overcommit_huge_pages;
 
@@ -3502,7 +3502,7 @@ static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
         * COW. Warn that such a situation has occurred as it may not be obvious
         */
        if (is_vma_resv_set(vma, HPAGE_RESV_UNMAPPED)) {
-               pr_warning("PID %d killed due to inadequate hugepage pool\n",
+               pr_warn_ratelimited("PID %d killed due to inadequate hugepage pool\n",
                           current->pid);
                return ret;
        }
index bc0a8d8b8f42faf7bca01501425ba73e156ee293..1ad20ade8c91328d2f47e8c116f9aca6bc2fb57f 100644 (file)
@@ -20,6 +20,7 @@
 #include <linux/init.h>
 #include <linux/kernel.h>
 #include <linux/kmemleak.h>
+#include <linux/linkage.h>
 #include <linux/memblock.h>
 #include <linux/memory.h>
 #include <linux/mm.h>
@@ -60,6 +61,25 @@ void kasan_unpoison_shadow(const void *address, size_t size)
        }
 }
 
+static void __kasan_unpoison_stack(struct task_struct *task, void *sp)
+{
+       void *base = task_stack_page(task);
+       size_t size = sp - base;
+
+       kasan_unpoison_shadow(base, size);
+}
+
+/* Unpoison the entire stack for a task. */
+void kasan_unpoison_task_stack(struct task_struct *task)
+{
+       __kasan_unpoison_stack(task, task_stack_page(task) + THREAD_SIZE);
+}
+
+/* Unpoison the stack for the current task beyond a watermark sp value. */
+asmlinkage void kasan_unpoison_remaining_stack(void *sp)
+{
+       __kasan_unpoison_stack(current, sp);
+}
 
 /*
  * All functions below always inlined so compiler could
index 4c4187c0e1deeb25bdbf5eb383132d27feb9be90..9a3f6b90e6283b24bff091213768612b04b6d928 100644 (file)
@@ -532,7 +532,7 @@ retry:
                nid = page_to_nid(page);
                if (node_isset(nid, *qp->nmask) == !!(flags & MPOL_MF_INVERT))
                        continue;
-               if (PageTail(page) && PageAnon(page)) {
+               if (PageTransCompound(page) && PageAnon(page)) {
                        get_page(page);
                        pte_unmap_unlock(pte, ptl);
                        lock_page(page);
index 004d42b1dfaf928ab174e057696afa580447f3d9..7924f4f58a6d48ae5335fdedb1c2e2dd93a56d88 100644 (file)
@@ -135,8 +135,8 @@ static void *remove_element(mempool_t *pool)
        void *element = pool->elements[--pool->curr_nr];
 
        BUG_ON(pool->curr_nr < 0);
-       check_element(pool, element);
        kasan_unpoison_element(pool, element);
+       check_element(pool, element);
        return element;
 }
 
index 82e3e97050173542b5d6094ebc4db81324198b18..dcea4f4c62b3b43d701fb68a74a6aa1a1e0aa8cf 100644 (file)
@@ -723,6 +723,8 @@ int br_fdb_dump(struct sk_buff *skb,
                struct net_bridge_fdb_entry *f;
 
                hlist_for_each_entry_rcu(f, &br->hash[i], hlist) {
+                       int err;
+
                        if (idx < cb->args[0])
                                goto skip;
 
@@ -741,12 +743,15 @@ int br_fdb_dump(struct sk_buff *skb,
                        if (!filter_dev && f->dst)
                                goto skip;
 
-                       if (fdb_fill_info(skb, br, f,
-                                         NETLINK_CB(cb->skb).portid,
-                                         cb->nlh->nlmsg_seq,
-                                         RTM_NEWNEIGH,
-                                         NLM_F_MULTI) < 0)
+                       err = fdb_fill_info(skb, br, f,
+                                           NETLINK_CB(cb->skb).portid,
+                                           cb->nlh->nlmsg_seq,
+                                           RTM_NEWNEIGH,
+                                           NLM_F_MULTI);
+                       if (err < 0) {
+                               cb->args[1] = err;
                                break;
+                       }
 skip:
                        ++idx;
                }
index 94d26201080d6671080f63865994bb41d7d3d8bc..bba502f7cd575692a9ed795f83d2fdedf2428ed6 100644 (file)
@@ -1752,7 +1752,7 @@ static u64 bpf_skb_set_tunnel_key(u64 r1, u64 r2, u64 size, u64 flags, u64 r5)
        u8 compat[sizeof(struct bpf_tunnel_key)];
        struct ip_tunnel_info *info;
 
-       if (unlikely(flags & ~(BPF_F_TUNINFO_IPV6)))
+       if (unlikely(flags & ~(BPF_F_TUNINFO_IPV6 | BPF_F_ZERO_CSUM_TX)))
                return -EINVAL;
        if (unlikely(size != sizeof(struct bpf_tunnel_key))) {
                switch (size) {
@@ -1776,7 +1776,7 @@ static u64 bpf_skb_set_tunnel_key(u64 r1, u64 r2, u64 size, u64 flags, u64 r5)
        info = &md->u.tun_info;
        info->mode = IP_TUNNEL_INFO_TX;
 
-       info->key.tun_flags = TUNNEL_KEY;
+       info->key.tun_flags = TUNNEL_KEY | TUNNEL_CSUM;
        info->key.tun_id = cpu_to_be64(from->tunnel_id);
        info->key.tos = from->tunnel_tos;
        info->key.ttl = from->tunnel_ttl;
@@ -1787,6 +1787,8 @@ static u64 bpf_skb_set_tunnel_key(u64 r1, u64 r2, u64 size, u64 flags, u64 r5)
                       sizeof(from->remote_ipv6));
        } else {
                info->key.u.ipv4.dst = cpu_to_be32(from->remote_ipv4);
+               if (flags & BPF_F_ZERO_CSUM_TX)
+                       info->key.tun_flags &= ~TUNNEL_CSUM;
        }
 
        return 0;
index d735e854f916040912fb12930cbc6a7950ace942..8261d95dd846647798c7dba8f1dbfa4cf61a0eef 100644 (file)
@@ -2911,6 +2911,7 @@ int ndo_dflt_fdb_dump(struct sk_buff *skb,
        nlmsg_populate_fdb(skb, cb, dev, &idx, &dev->mc);
 out:
        netif_addr_unlock_bh(dev);
+       cb->args[1] = err;
        return idx;
 }
 EXPORT_SYMBOL(ndo_dflt_fdb_dump);
@@ -2944,6 +2945,7 @@ static int rtnl_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb)
                ops = br_dev->netdev_ops;
        }
 
+       cb->args[1] = 0;
        for_each_netdev(net, dev) {
                if (brport_idx && (dev->ifindex != brport_idx))
                        continue;
@@ -2971,12 +2973,16 @@ static int rtnl_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb)
                                idx = cops->ndo_fdb_dump(skb, cb, br_dev, dev,
                                                         idx);
                }
+               if (cb->args[1] == -EMSGSIZE)
+                       break;
 
                if (dev->netdev_ops->ndo_fdb_dump)
                        idx = dev->netdev_ops->ndo_fdb_dump(skb, cb, dev, NULL,
                                                            idx);
                else
                        idx = ndo_dflt_fdb_dump(skb, cb, dev, NULL, idx);
+               if (cb->args[1] == -EMSGSIZE)
+                       break;
 
                cops = NULL;
        }
index 5bf88f58bee7405ff65f80487a64339b92a91bcb..8616d1147c938808da752734b6cecea260b149b9 100644 (file)
@@ -2947,6 +2947,24 @@ int skb_append_pagefrags(struct sk_buff *skb, struct page *page,
 }
 EXPORT_SYMBOL_GPL(skb_append_pagefrags);
 
+/**
+ *     skb_push_rcsum - push skb and update receive checksum
+ *     @skb: buffer to update
+ *     @len: length of data pulled
+ *
+ *     This function performs an skb_push on the packet and updates
+ *     the CHECKSUM_COMPLETE checksum.  It should be used on
+ *     receive path processing instead of skb_push unless you know
+ *     that the checksum difference is zero (e.g., a valid IP header)
+ *     or you are setting ip_summed to CHECKSUM_NONE.
+ */
+static unsigned char *skb_push_rcsum(struct sk_buff *skb, unsigned len)
+{
+       skb_push(skb, len);
+       skb_postpush_rcsum(skb, skb->data, len);
+       return skb->data;
+}
+
 /**
  *     skb_pull_rcsum - pull skb and update receive checksum
  *     @skb: buffer to update
@@ -4084,9 +4102,9 @@ struct sk_buff *skb_checksum_trimmed(struct sk_buff *skb,
        if (!pskb_may_pull(skb_chk, offset))
                goto err;
 
-       __skb_pull(skb_chk, offset);
+       skb_pull_rcsum(skb_chk, offset);
        ret = skb_chkf(skb_chk);
-       __skb_push(skb_chk, offset);
+       skb_push_rcsum(skb_chk, offset);
 
        if (ret)
                goto err;
index 05e4cba14162f3583ec588657af7e8b68546b111..b3086cf2702759d1077cb6a014afbd130d8206db 100644 (file)
@@ -356,9 +356,8 @@ static struct sk_buff *igmpv3_newpack(struct net_device *dev, unsigned int mtu)
        skb_dst_set(skb, &rt->dst);
        skb->dev = dev;
 
-       skb->reserved_tailroom = skb_end_offset(skb) -
-                                min(mtu, skb_end_offset(skb));
        skb_reserve(skb, hlen);
+       skb_tailroom_reserve(skb, mtu, tlen);
 
        skb_reset_network_header(skb);
        pip = ip_hdr(skb);
index 64878efa045c132d1ce511c326c2da04fc77a895..565bf64b2b7d6047a29e69df00fb3b85ec84f06a 100644 (file)
@@ -1236,13 +1236,16 @@ ssize_t ip_append_page(struct sock *sk, struct flowi4 *fl4, struct page *page,
        if (!skb)
                return -EINVAL;
 
-       cork->length += size;
        if ((size + skb->len > mtu) &&
            (sk->sk_protocol == IPPROTO_UDP) &&
            (rt->dst.dev->features & NETIF_F_UFO)) {
+               if (skb->ip_summed != CHECKSUM_PARTIAL)
+                       return -EOPNOTSUPP;
+
                skb_shinfo(skb)->gso_size = mtu - fragheaderlen;
                skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
        }
+       cork->length += size;
 
        while (size > 0) {
                if (skb_is_gso(skb)) {
index 89e8861e05fcb1d371c371c1784b89499b69d9df..336e6892a93ce99aabf1794133c9567ee0effde7 100644 (file)
@@ -661,6 +661,8 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
        inner_iph = (const struct iphdr *)skb_inner_network_header(skb);
        connected = (tunnel->parms.iph.daddr != 0);
 
+       memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
+
        dst = tnl_params->daddr;
        if (dst == 0) {
                /* NBMA tunnel */
@@ -758,7 +760,6 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
                                tunnel->err_time + IPTUNNEL_ERR_TIMEO)) {
                        tunnel->err_count--;
 
-                       memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
                        dst_link_failure(skb);
                } else
                        tunnel->err_count = 0;
index c8cbc2b4b7921fb4f70681e4ac6d945f5499654c..a726d7853ce53fe03b48b199ca909c02393cabcf 100644 (file)
@@ -550,7 +550,7 @@ reset:
         */
        if (crtt > tp->srtt_us) {
                /* Set RTO like tcp_rtt_estimator(), but from cached RTT. */
-               crtt /= 8 * USEC_PER_MSEC;
+               crtt /= 8 * USEC_PER_SEC / HZ;
                inet_csk(sk)->icsk_rto = crtt + max(2 * crtt, tcp_rto_min(sk));
        } else if (tp->srtt_us == 0) {
                /* RFC6298: 5.7 We've failed to get a valid RTT sample from
index 75632a92582425db63f1078c223cbd54a91fa0c3..9b02af2139d3d3245ea952f643fc52e0ee92a9ab 100644 (file)
@@ -455,7 +455,7 @@ struct sock *tcp_create_openreq_child(const struct sock *sk,
 
                newtp->rcv_wup = newtp->copied_seq =
                newtp->rcv_nxt = treq->rcv_isn + 1;
-               newtp->segs_in = 0;
+               newtp->segs_in = 1;
 
                newtp->snd_sml = newtp->snd_una =
                newtp->snd_nxt = newtp->snd_up = treq->snt_isn + 1;
@@ -815,6 +815,7 @@ int tcp_child_process(struct sock *parent, struct sock *child,
        int ret = 0;
        int state = child->sk_state;
 
+       tcp_sk(child)->segs_in += max_t(u16, 1, skb_shinfo(skb)->gso_segs);
        if (!sock_owned_by_user(child)) {
                ret = tcp_rcv_state_process(child, skb);
                /* Wakeup parent, send SIGIO */
index 0ec08814f37d9e904b5f85bbd86cd2fa607c4307..96599d1a13184d4ef6d34ae9bff7833420d0ad93 100644 (file)
@@ -89,6 +89,8 @@ void udp_tunnel_xmit_skb(struct rtable *rt, struct sock *sk, struct sk_buff *skb
        uh->source = src_port;
        uh->len = htons(skb->len);
 
+       memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
+
        udp_set_csum(nocheck, skb, src, dst, skb->len);
 
        iptunnel_xmit(sk, rt, skb, src, dst, IPPROTO_UDP, tos, ttl, df, xnet);
index 5c5d23e59da598995ff962d069d1e7b6886e31d6..9508a20fbf61432f561202edbe40b59e63c3489e 100644 (file)
@@ -257,7 +257,11 @@ int ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset,
                                                *fragoff = _frag_off;
                                        return hp->nexthdr;
                                }
-                               return -ENOENT;
+                               if (!found)
+                                       return -ENOENT;
+                               if (fragoff)
+                                       *fragoff = _frag_off;
+                               break;
                        }
                        hdrlen = 8;
                } else if (nexthdr == NEXTHDR_AUTH) {
index a69aad1e29d1ebb2429650cc083f0e4dd2bcaf86..c0d4dc1c5ea4c31d8ebb4512bb55fcd177ce9f4e 100644 (file)
@@ -777,6 +777,8 @@ static inline int ip6gre_xmit_ipv4(struct sk_buff *skb, struct net_device *dev)
        __u32 mtu;
        int err;
 
+       memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
+
        if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
                encap_limit = t->parms.encap_limit;
 
index 137fca42aaa6bb809d46e7809b240d8810d89a04..6c5dfec7a3779601eb760eeeb975ae5e6db00bb7 100644 (file)
@@ -1180,6 +1180,8 @@ ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
        u8 tproto;
        int err;
 
+       memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
+
        tproto = ACCESS_ONCE(t->parms.proto);
        if (tproto != IPPROTO_IPIP && tproto != 0)
                return -1;
index 5ee56d0a8699e22434d5ad8b4380f890fa8d2126..d64ee7e8366492a439ab70ea7487bd3669800097 100644 (file)
@@ -1574,9 +1574,8 @@ static struct sk_buff *mld_newpack(struct inet6_dev *idev, unsigned int mtu)
                return NULL;
 
        skb->priority = TC_PRIO_CONTROL;
-       skb->reserved_tailroom = skb_end_offset(skb) -
-                                min(mtu, skb_end_offset(skb));
        skb_reserve(skb, hlen);
+       skb_tailroom_reserve(skb, mtu, tlen);
 
        if (__ipv6_get_lladdr(idev, &addr_buf, IFA_F_TENTATIVE)) {
                /* <draft-ietf-magma-mld-source-05.txt>:
index 22e28a44e3c88c6e52ff59ea1c7dd8a3e964e701..422dd014aa2ce9b27263eed4612ff156271d982f 100644 (file)
@@ -962,11 +962,9 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
                ret = udpv6_queue_rcv_skb(sk, skb);
                sock_put(sk);
 
-               /* a return value > 0 means to resubmit the input, but
-                * it wants the return to be -protocol, or 0
-                */
+               /* a return value > 0 means to resubmit the input */
                if (ret > 0)
-                       return -ret;
+                       return ret;
 
                return 0;
        }
index 10ad4ac1fa0ba8ebd04e793595fab47d326194ce..367784be5df20f26fd3940c4608f1ea715bfddd6 100644 (file)
@@ -291,7 +291,7 @@ void __ieee80211_start_rx_ba_session(struct sta_info *sta,
        }
 
        /* prepare A-MPDU MLME for Rx aggregation */
-       tid_agg_rx = kmalloc(sizeof(struct tid_ampdu_rx), GFP_KERNEL);
+       tid_agg_rx = kzalloc(sizeof(*tid_agg_rx), GFP_KERNEL);
        if (!tid_agg_rx)
                goto end;
 
index b84f6aa32c0831ce200f286af2ecd947ef0fefde..f006f4a44c0e6fbcdec025dcaa5450d194403bd9 100644 (file)
@@ -92,7 +92,7 @@ struct ieee80211_fragment_entry {
        u16 extra_len;
        u16 last_frag;
        u8 rx_queue;
-       bool ccmp; /* Whether fragments were encrypted with CCMP */
+       bool check_sequential_pn; /* needed for CCMP/GCMP */
        u8 last_pn[6]; /* PN of the last fragment if CCMP was used */
 };
 
index 3ece7d1034c81ae8749cada074fbebecbe06d57f..b54f398cda5d0e2d561f2383f1d049a5410fcf68 100644 (file)
@@ -711,7 +711,7 @@ static u32 minstrel_get_expected_throughput(void *priv_sta)
         * computing cur_tp
         */
        tmp_mrs = &mi->r[idx].stats;
-       tmp_cur_tp = minstrel_get_tp_avg(&mi->r[idx], tmp_mrs->prob_ewma);
+       tmp_cur_tp = minstrel_get_tp_avg(&mi->r[idx], tmp_mrs->prob_ewma) * 10;
        tmp_cur_tp = tmp_cur_tp * 1200 * 8 / 1024;
 
        return tmp_cur_tp;
index 3928dbd24e257e68627aa977cc54a19aaa996339..370d677b547b2aa2591bea4302e4994bddfe2649 100644 (file)
@@ -414,15 +414,16 @@ minstrel_ht_set_best_prob_rate(struct minstrel_ht_sta *mi, u16 index)
            (max_tp_group != MINSTREL_CCK_GROUP))
                return;
 
+       max_gpr_group = mg->max_group_prob_rate / MCS_GROUP_RATES;
+       max_gpr_idx = mg->max_group_prob_rate % MCS_GROUP_RATES;
+       max_gpr_prob = mi->groups[max_gpr_group].rates[max_gpr_idx].prob_ewma;
+
        if (mrs->prob_ewma > MINSTREL_FRAC(75, 100)) {
                cur_tp_avg = minstrel_ht_get_tp_avg(mi, cur_group, cur_idx,
                                                    mrs->prob_ewma);
                if (cur_tp_avg > tmp_tp_avg)
                        mi->max_prob_rate = index;
 
-               max_gpr_group = mg->max_group_prob_rate / MCS_GROUP_RATES;
-               max_gpr_idx = mg->max_group_prob_rate % MCS_GROUP_RATES;
-               max_gpr_prob = mi->groups[max_gpr_group].rates[max_gpr_idx].prob_ewma;
                max_gpr_tp_avg = minstrel_ht_get_tp_avg(mi, max_gpr_group,
                                                        max_gpr_idx,
                                                        max_gpr_prob);
@@ -431,7 +432,7 @@ minstrel_ht_set_best_prob_rate(struct minstrel_ht_sta *mi, u16 index)
        } else {
                if (mrs->prob_ewma > tmp_prob)
                        mi->max_prob_rate = index;
-               if (mrs->prob_ewma > mg->rates[mg->max_group_prob_rate].prob_ewma)
+               if (mrs->prob_ewma > max_gpr_prob)
                        mg->max_group_prob_rate = index;
        }
 }
@@ -691,7 +692,7 @@ minstrel_aggr_check(struct ieee80211_sta *pubsta, struct sk_buff *skb)
        if (likely(sta->ampdu_mlme.tid_tx[tid]))
                return;
 
-       ieee80211_start_tx_ba_session(pubsta, tid, 5000);
+       ieee80211_start_tx_ba_session(pubsta, tid, 0);
 }
 
 static void
@@ -871,7 +872,7 @@ minstrel_ht_set_rate(struct minstrel_priv *mp, struct minstrel_ht_sta *mi,
         *  - if station is in dynamic SMPS (and streams > 1)
         *  - for fallback rates, to increase chances of getting through
         */
-       if (offset > 0 &&
+       if (offset > 0 ||
            (mi->sta->smps_mode == IEEE80211_SMPS_DYNAMIC &&
             group->streams > 1)) {
                ratetbl->rate[offset].count = ratetbl->rate[offset].count_rts;
@@ -1334,7 +1335,8 @@ static u32 minstrel_ht_get_expected_throughput(void *priv_sta)
        prob = mi->groups[i].rates[j].prob_ewma;
 
        /* convert tp_avg from pkt per second in kbps */
-       tp_avg = minstrel_ht_get_tp_avg(mi, i, j, prob) * AVG_PKT_SIZE * 8 / 1024;
+       tp_avg = minstrel_ht_get_tp_avg(mi, i, j, prob) * 10;
+       tp_avg = tp_avg * AVG_PKT_SIZE * 8 / 1024;
 
        return tp_avg;
 }
index bc081850ac0e5538241bd1cab37b65aeefd2c244..60d093f40f1d16de32b1bb962b25ddb12f9a1b72 100644 (file)
@@ -1753,7 +1753,7 @@ ieee80211_reassemble_add(struct ieee80211_sub_if_data *sdata,
        entry->seq = seq;
        entry->rx_queue = rx_queue;
        entry->last_frag = frag;
-       entry->ccmp = 0;
+       entry->check_sequential_pn = false;
        entry->extra_len = 0;
 
        return entry;
@@ -1849,15 +1849,27 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
                                                 rx->seqno_idx, &(rx->skb));
                if (rx->key &&
                    (rx->key->conf.cipher == WLAN_CIPHER_SUITE_CCMP ||
-                    rx->key->conf.cipher == WLAN_CIPHER_SUITE_CCMP_256) &&
+                    rx->key->conf.cipher == WLAN_CIPHER_SUITE_CCMP_256 ||
+                    rx->key->conf.cipher == WLAN_CIPHER_SUITE_GCMP ||
+                    rx->key->conf.cipher == WLAN_CIPHER_SUITE_GCMP_256) &&
                    ieee80211_has_protected(fc)) {
                        int queue = rx->security_idx;
-                       /* Store CCMP PN so that we can verify that the next
-                        * fragment has a sequential PN value. */
-                       entry->ccmp = 1;
+
+                       /* Store CCMP/GCMP PN so that we can verify that the
+                        * next fragment has a sequential PN value.
+                        */
+                       entry->check_sequential_pn = true;
                        memcpy(entry->last_pn,
                               rx->key->u.ccmp.rx_pn[queue],
                               IEEE80211_CCMP_PN_LEN);
+                       BUILD_BUG_ON(offsetof(struct ieee80211_key,
+                                             u.ccmp.rx_pn) !=
+                                    offsetof(struct ieee80211_key,
+                                             u.gcmp.rx_pn));
+                       BUILD_BUG_ON(sizeof(rx->key->u.ccmp.rx_pn[queue]) !=
+                                    sizeof(rx->key->u.gcmp.rx_pn[queue]));
+                       BUILD_BUG_ON(IEEE80211_CCMP_PN_LEN !=
+                                    IEEE80211_GCMP_PN_LEN);
                }
                return RX_QUEUED;
        }
@@ -1872,15 +1884,21 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
                return RX_DROP_MONITOR;
        }
 
-       /* Verify that MPDUs within one MSDU have sequential PN values.
-        * (IEEE 802.11i, 8.3.3.4.5) */
-       if (entry->ccmp) {
+       /* "The receiver shall discard MSDUs and MMPDUs whose constituent
+        *  MPDU PN values are not incrementing in steps of 1."
+        * see IEEE P802.11-REVmc/D5.0, 12.5.3.4.4, item d (for CCMP)
+        * and IEEE P802.11-REVmc/D5.0, 12.5.5.4.4, item d (for GCMP)
+        */
+       if (entry->check_sequential_pn) {
                int i;
                u8 pn[IEEE80211_CCMP_PN_LEN], *rpn;
                int queue;
+
                if (!rx->key ||
                    (rx->key->conf.cipher != WLAN_CIPHER_SUITE_CCMP &&
-                    rx->key->conf.cipher != WLAN_CIPHER_SUITE_CCMP_256))
+                    rx->key->conf.cipher != WLAN_CIPHER_SUITE_CCMP_256 &&
+                    rx->key->conf.cipher != WLAN_CIPHER_SUITE_GCMP &&
+                    rx->key->conf.cipher != WLAN_CIPHER_SUITE_GCMP_256))
                        return RX_DROP_UNUSABLE;
                memcpy(pn, entry->last_pn, IEEE80211_CCMP_PN_LEN);
                for (i = IEEE80211_CCMP_PN_LEN - 1; i >= 0; i--) {
@@ -3366,6 +3384,7 @@ static bool ieee80211_accept_frame(struct ieee80211_rx_data *rx)
                                return false;
                        /* ignore action frames to TDLS-peers */
                        if (ieee80211_is_action(hdr->frame_control) &&
+                           !is_broadcast_ether_addr(bssid) &&
                            !ether_addr_equal(bssid, hdr->addr1))
                                return false;
                }
index d05869646515dbabeb352361c9d58b9ccfc687d5..6b70399ab78121e723efc439b27122f18b52a206 100644 (file)
@@ -62,6 +62,7 @@ static void ipt_destroy_target(struct xt_entry_target *t)
        struct xt_tgdtor_param par = {
                .target   = t->u.kernel.target,
                .targinfo = t->data,
+               .family   = NFPROTO_IPV4,
        };
        if (par.target->destroy != NULL)
                par.target->destroy(&par);
@@ -195,6 +196,7 @@ static int tcf_ipt(struct sk_buff *skb, const struct tc_action *a,
        par.hooknum  = ipt->tcfi_hook;
        par.target   = ipt->tcfi_t->u.kernel.target;
        par.targinfo = ipt->tcfi_t->data;
+       par.family   = NFPROTO_IPV4;
        ret = par.target->target(skb, &par);
 
        switch (ret) {
index ec529121f38a03c4b3e2317249cd0694fd619d3f..ce46f1c7f133ad5b114e4c2cd571d26c2b9ee901 100644 (file)
@@ -526,6 +526,8 @@ static int sctp_v6_cmp_addr(const union sctp_addr *addr1,
                }
                return 0;
        }
+       if (addr1->v6.sin6_port != addr2->v6.sin6_port)
+               return 0;
        if (!ipv6_addr_equal(&addr1->v6.sin6_addr, &addr2->v6.sin6_addr))
                return 0;
        /* If this is a linklocal address, compare the scope_id. */
index ded7d931a6a5b218c7bbd4481acdea1d2bc29325..963dffcc2618b0fa5d186460bf12960c21f9babe 100644 (file)
@@ -482,7 +482,7 @@ static void sctp_remaddr_seq_stop(struct seq_file *seq, void *v)
 static int sctp_remaddr_seq_show(struct seq_file *seq, void *v)
 {
        struct sctp_association *assoc;
-       struct sctp_transport *tsp;
+       struct sctp_transport *transport, *tsp;
 
        if (v == SEQ_START_TOKEN) {
                seq_printf(seq, "ADDR ASSOC_ID HB_ACT RTO MAX_PATH_RTX "
@@ -490,10 +490,10 @@ static int sctp_remaddr_seq_show(struct seq_file *seq, void *v)
                return 0;
        }
 
-       tsp = (struct sctp_transport *)v;
-       if (!sctp_transport_hold(tsp))
+       transport = (struct sctp_transport *)v;
+       if (!sctp_transport_hold(transport))
                return 0;
-       assoc = tsp->asoc;
+       assoc = transport->asoc;
 
        list_for_each_entry_rcu(tsp, &assoc->peer.transport_addr_list,
                                transports) {
@@ -546,7 +546,7 @@ static int sctp_remaddr_seq_show(struct seq_file *seq, void *v)
                seq_printf(seq, "\n");
        }
 
-       sctp_transport_put(tsp);
+       sctp_transport_put(transport);
 
        return 0;
 }
index 47f7da58a7f0e93e3ffd632396a5a8db65c7f7c3..8b5833c1ff2e8695030587749c8c72d86d334323 100644 (file)
@@ -1093,8 +1093,11 @@ int switchdev_port_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
                .cb = cb,
                .idx = idx,
        };
+       int err;
 
-       switchdev_port_obj_dump(dev, &dump.fdb.obj, switchdev_port_fdb_dump_cb);
+       err = switchdev_port_obj_dump(dev, &dump.fdb.obj,
+                                     switchdev_port_fdb_dump_cb);
+       cb->args[1] = err;
        return dump.idx;
 }
 EXPORT_SYMBOL_GPL(switchdev_port_fdb_dump);
index 69c29050f14abe8c926db671dfa05c96f2ddc3bc..4d420bb273960cd6eac206753f25435970f2724d 100644 (file)
@@ -673,7 +673,7 @@ static int tipc_sendmcast(struct  socket *sock, struct tipc_name_seq *seq,
        struct tipc_sock *tsk = tipc_sk(sk);
        struct net *net = sock_net(sk);
        struct tipc_msg *mhdr = &tsk->phdr;
-       struct sk_buff_head *pktchain = &sk->sk_write_queue;
+       struct sk_buff_head pktchain;
        struct iov_iter save = msg->msg_iter;
        uint mtu;
        int rc;
@@ -687,14 +687,16 @@ static int tipc_sendmcast(struct  socket *sock, struct tipc_name_seq *seq,
        msg_set_nameupper(mhdr, seq->upper);
        msg_set_hdr_sz(mhdr, MCAST_H_SIZE);
 
+       skb_queue_head_init(&pktchain);
+
 new_mtu:
        mtu = tipc_bcast_get_mtu(net);
-       rc = tipc_msg_build(mhdr, msg, 0, dsz, mtu, pktchain);
+       rc = tipc_msg_build(mhdr, msg, 0, dsz, mtu, &pktchain);
        if (unlikely(rc < 0))
                return rc;
 
        do {
-               rc = tipc_bcast_xmit(net, pktchain);
+               rc = tipc_bcast_xmit(net, &pktchain);
                if (likely(!rc))
                        return dsz;
 
@@ -704,7 +706,7 @@ new_mtu:
                        if (!rc)
                                continue;
                }
-               __skb_queue_purge(pktchain);
+               __skb_queue_purge(&pktchain);
                if (rc == -EMSGSIZE) {
                        msg->msg_iter = save;
                        goto new_mtu;
@@ -863,7 +865,7 @@ static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dsz)
        struct net *net = sock_net(sk);
        struct tipc_msg *mhdr = &tsk->phdr;
        u32 dnode, dport;
-       struct sk_buff_head *pktchain = &sk->sk_write_queue;
+       struct sk_buff_head pktchain;
        struct sk_buff *skb;
        struct tipc_name_seq *seq;
        struct iov_iter save;
@@ -924,17 +926,18 @@ static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dsz)
                msg_set_hdr_sz(mhdr, BASIC_H_SIZE);
        }
 
+       skb_queue_head_init(&pktchain);
        save = m->msg_iter;
 new_mtu:
        mtu = tipc_node_get_mtu(net, dnode, tsk->portid);
-       rc = tipc_msg_build(mhdr, m, 0, dsz, mtu, pktchain);
+       rc = tipc_msg_build(mhdr, m, 0, dsz, mtu, &pktchain);
        if (rc < 0)
                return rc;
 
        do {
-               skb = skb_peek(pktchain);
+               skb = skb_peek(&pktchain);
                TIPC_SKB_CB(skb)->wakeup_pending = tsk->link_cong;
-               rc = tipc_node_xmit(net, pktchain, dnode, tsk->portid);
+               rc = tipc_node_xmit(net, &pktchain, dnode, tsk->portid);
                if (likely(!rc)) {
                        if (sock->state != SS_READY)
                                sock->state = SS_CONNECTING;
@@ -946,7 +949,7 @@ new_mtu:
                        if (!rc)
                                continue;
                }
-               __skb_queue_purge(pktchain);
+               __skb_queue_purge(&pktchain);
                if (rc == -EMSGSIZE) {
                        m->msg_iter = save;
                        goto new_mtu;
@@ -1016,7 +1019,7 @@ static int __tipc_send_stream(struct socket *sock, struct msghdr *m, size_t dsz)
        struct net *net = sock_net(sk);
        struct tipc_sock *tsk = tipc_sk(sk);
        struct tipc_msg *mhdr = &tsk->phdr;
-       struct sk_buff_head *pktchain = &sk->sk_write_queue;
+       struct sk_buff_head pktchain;
        DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
        u32 portid = tsk->portid;
        int rc = -EINVAL;
@@ -1044,17 +1047,19 @@ static int __tipc_send_stream(struct socket *sock, struct msghdr *m, size_t dsz)
 
        timeo = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT);
        dnode = tsk_peer_node(tsk);
+       skb_queue_head_init(&pktchain);
 
 next:
        save = m->msg_iter;
        mtu = tsk->max_pkt;
        send = min_t(uint, dsz - sent, TIPC_MAX_USER_MSG_SIZE);
-       rc = tipc_msg_build(mhdr, m, sent, send, mtu, pktchain);
+       rc = tipc_msg_build(mhdr, m, sent, send, mtu, &pktchain);
        if (unlikely(rc < 0))
                return rc;
+
        do {
                if (likely(!tsk_conn_cong(tsk))) {
-                       rc = tipc_node_xmit(net, pktchain, dnode, portid);
+                       rc = tipc_node_xmit(net, &pktchain, dnode, portid);
                        if (likely(!rc)) {
                                tsk->sent_unacked++;
                                sent += send;
@@ -1063,7 +1068,7 @@ next:
                                goto next;
                        }
                        if (rc == -EMSGSIZE) {
-                               __skb_queue_purge(pktchain);
+                               __skb_queue_purge(&pktchain);
                                tsk->max_pkt = tipc_node_get_mtu(net, dnode,
                                                                 portid);
                                m->msg_iter = save;
@@ -1077,7 +1082,7 @@ next:
                rc = tipc_wait_for_sndpkt(sock, &timeo);
        } while (!rc);
 
-       __skb_queue_purge(pktchain);
+       __skb_queue_purge(&pktchain);
        return sent ? sent : rc;
 }
 
index 69ee2eeef968851192035cb166410f1f37e7722f..f9ff73a8d8154ff0a52f8c33a64077f5d8c57ae5 100644 (file)
@@ -296,7 +296,8 @@ static void tipc_subscrb_rcv_cb(struct net *net, int conid,
        if (tipc_subscrp_create(net, (struct tipc_subscr *)buf, subscrb, &sub))
                return tipc_conn_terminate(tn->topsrv, subscrb->conid);
 
-       tipc_nametbl_subscribe(sub);
+       if (sub)
+               tipc_nametbl_subscribe(sub);
 }
 
 /* Handle one request to establish a new subscriber */
index b0915515640efed1ff4795103c0572aba48c38f4..8f0bac7e03c406466e0b5ca2b01b3ca2574ab79f 100644 (file)
@@ -1147,6 +1147,8 @@ static int cfg80211_netdev_notifier_call(struct notifier_block *nb,
                return NOTIFY_DONE;
        }
 
+       wireless_nlevent_flush();
+
        return NOTIFY_OK;
 }
 
index d4786f2802aa3c94f0a9bfcba846c98ec5b2b249..711cb7ad6ae011132b868d928ebe23037040db82 100644 (file)
@@ -7547,7 +7547,7 @@ static int nl80211_join_ibss(struct sk_buff *skb, struct genl_info *info)
 
                if ((ibss.chandef.width != NL80211_CHAN_WIDTH_20_NOHT) &&
                    no_ht) {
-                       kfree(connkeys);
+                       kzfree(connkeys);
                        return -EINVAL;
                }
        }
index 8020b5b094d4c8fba0c0f2431f8af7d8ecc40dca..d49ed7666d4cb3e5a20641ebc0acfd3ccda80ff9 100644 (file)
@@ -917,6 +917,12 @@ void __cfg80211_disconnected(struct net_device *dev, const u8 *ie,
 
        nl80211_send_disconnected(rdev, dev, reason, ie, ie_len, from_ap);
 
+       /* stop critical protocol if supported */
+       if (rdev->ops->crit_proto_stop && rdev->crit_proto_nlportid) {
+               rdev->crit_proto_nlportid = 0;
+               rdev_crit_proto_stop(rdev, wdev);
+       }
+
        /*
         * Delete all the keys ... pairwise keys can't really
         * exist any more anyway, but default keys might.
index c8717c1d082e702f9b071c480e873b408b400daf..b50ee5d622e14d4fb486ce0c533757e958702f54 100644 (file)
@@ -342,6 +342,40 @@ static const int compat_event_type_size[] = {
 
 /* IW event code */
 
+void wireless_nlevent_flush(void)
+{
+       struct sk_buff *skb;
+       struct net *net;
+
+       ASSERT_RTNL();
+
+       for_each_net(net) {
+               while ((skb = skb_dequeue(&net->wext_nlevents)))
+                       rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL,
+                                   GFP_KERNEL);
+       }
+}
+EXPORT_SYMBOL_GPL(wireless_nlevent_flush);
+
+static int wext_netdev_notifier_call(struct notifier_block *nb,
+                                    unsigned long state, void *ptr)
+{
+       /*
+        * When a netdev changes state in any way, flush all pending messages
+        * to avoid them going out in a strange order, e.g. RTM_NEWLINK after
+        * RTM_DELLINK, or with IFF_UP after without IFF_UP during dev_close()
+        * or similar - all of which could otherwise happen due to delays from
+        * schedule_work().
+        */
+       wireless_nlevent_flush();
+
+       return NOTIFY_OK;
+}
+
+static struct notifier_block wext_netdev_notifier = {
+       .notifier_call = wext_netdev_notifier_call,
+};
+
 static int __net_init wext_pernet_init(struct net *net)
 {
        skb_queue_head_init(&net->wext_nlevents);
@@ -360,7 +394,12 @@ static struct pernet_operations wext_pernet_ops = {
 
 static int __init wireless_nlevent_init(void)
 {
-       return register_pernet_subsys(&wext_pernet_ops);
+       int err = register_pernet_subsys(&wext_pernet_ops);
+
+       if (err)
+               return err;
+
+       return register_netdevice_notifier(&wext_netdev_notifier);
 }
 
 subsys_initcall(wireless_nlevent_init);
@@ -368,17 +407,8 @@ subsys_initcall(wireless_nlevent_init);
 /* Process events generated by the wireless layer or the driver. */
 static void wireless_nlevent_process(struct work_struct *work)
 {
-       struct sk_buff *skb;
-       struct net *net;
-
        rtnl_lock();
-
-       for_each_net(net) {
-               while ((skb = skb_dequeue(&net->wext_nlevents)))
-                       rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL,
-                                   GFP_KERNEL);
-       }
-
+       wireless_nlevent_flush();
        rtnl_unlock();
 }
 
index d154f0877fd806a93d234e6d4288da27d6c5d09f..7bfe9fa1c8dc6db8d6c4415fbf0d26e66477e42e 100755 (executable)
@@ -1,7 +1,7 @@
 #!/usr/bin/awk -f
 # extract linker version number from stdin and turn into single number
        {
-       gsub(".*)", "");
+       gsub(".*\\)", "");
        gsub(".*version ", "");
        gsub("-.*", "");
        split($1,a, ".");
index affb192238a403b88dc38f3e76e0c54440779ea0..faae6936bae4f55851e4f0a7062f03fdebf674ee 100644 (file)
@@ -1130,7 +1130,7 @@ static int sid_status_control_get(struct snd_kcontrol *kcontrol,
        struct ab8500_codec_drvdata *drvdata = dev_get_drvdata(codec->dev);
 
        mutex_lock(&drvdata->ctrl_lock);
-       ucontrol->value.integer.value[0] = drvdata->sid_status;
+       ucontrol->value.enumerated.item[0] = drvdata->sid_status;
        mutex_unlock(&drvdata->ctrl_lock);
 
        return 0;
@@ -1147,7 +1147,7 @@ static int sid_status_control_put(struct snd_kcontrol *kcontrol,
 
        dev_dbg(codec->dev, "%s: Enter\n", __func__);
 
-       if (ucontrol->value.integer.value[0] != SID_APPLY_FIR) {
+       if (ucontrol->value.enumerated.item[0] != SID_APPLY_FIR) {
                dev_err(codec->dev,
                        "%s: ERROR: This control supports '%s' only!\n",
                        __func__, enum_sid_state[SID_APPLY_FIR]);
@@ -1199,7 +1199,7 @@ static int anc_status_control_get(struct snd_kcontrol *kcontrol,
        struct ab8500_codec_drvdata *drvdata = dev_get_drvdata(codec->dev);
 
        mutex_lock(&drvdata->ctrl_lock);
-       ucontrol->value.integer.value[0] = drvdata->anc_status;
+       ucontrol->value.enumerated.item[0] = drvdata->anc_status;
        mutex_unlock(&drvdata->ctrl_lock);
 
        return 0;
@@ -1220,7 +1220,7 @@ static int anc_status_control_put(struct snd_kcontrol *kcontrol,
 
        mutex_lock(&drvdata->ctrl_lock);
 
-       req = ucontrol->value.integer.value[0];
+       req = ucontrol->value.enumerated.item[0];
        if (req >= ARRAY_SIZE(enum_anc_state)) {
                status = -EINVAL;
                goto cleanup;
index e13583e6ff56aa5d69a163ca7a447acdaa18cbd6..5ae87a084d97e24a6aa243ba6cb1afc45290ac43 100644 (file)
@@ -103,9 +103,9 @@ bool adau17x1_has_dsp(struct adau *adau);
 #define ADAU17X1_CLOCK_CONTROL_CORECLK_SRC_PLL BIT(3)
 #define ADAU17X1_CLOCK_CONTROL_SYSCLK_EN       BIT(0)
 
-#define ADAU17X1_SERIAL_PORT1_BCLK32           (0x0 << 5)
-#define ADAU17X1_SERIAL_PORT1_BCLK48           (0x1 << 5)
-#define ADAU17X1_SERIAL_PORT1_BCLK64           (0x2 << 5)
+#define ADAU17X1_SERIAL_PORT1_BCLK64           (0x0 << 5)
+#define ADAU17X1_SERIAL_PORT1_BCLK32           (0x1 << 5)
+#define ADAU17X1_SERIAL_PORT1_BCLK48           (0x2 << 5)
 #define ADAU17X1_SERIAL_PORT1_BCLK128          (0x3 << 5)
 #define ADAU17X1_SERIAL_PORT1_BCLK256          (0x4 << 5)
 #define ADAU17X1_SERIAL_PORT1_BCLK_MASK                (0x7 << 5)
index b3951524339f90cdf8f90dbc23cbadd46a874af9..35488f14e2378ada271328a97504a9558b746836 100644 (file)
@@ -60,15 +60,15 @@ static int cs42l51_get_chan_mix(struct snd_kcontrol *kcontrol,
        switch (value) {
        default:
        case 0:
-               ucontrol->value.integer.value[0] = 0;
+               ucontrol->value.enumerated.item[0] = 0;
                break;
        /* same value : (L+R)/2 and (R+L)/2 */
        case 1:
        case 2:
-               ucontrol->value.integer.value[0] = 1;
+               ucontrol->value.enumerated.item[0] = 1;
                break;
        case 3:
-               ucontrol->value.integer.value[0] = 2;
+               ucontrol->value.enumerated.item[0] = 2;
                break;
        }
 
@@ -85,7 +85,7 @@ static int cs42l51_set_chan_mix(struct snd_kcontrol *kcontrol,
        struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
        unsigned char val;
 
-       switch (ucontrol->value.integer.value[0]) {
+       switch (ucontrol->value.enumerated.item[0]) {
        default:
        case 0:
                val = CHAN_MIX_NORMAL;
index 1d5a89c5164b85686f652dc8f5e13af6c1eb49e6..461506a4ca6a2ddb790b1646f27bce2d7abf25ba 100644 (file)
@@ -334,7 +334,7 @@ static int da732x_hpf_set(struct snd_kcontrol *kcontrol,
        struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
        struct soc_enum *enum_ctrl = (struct soc_enum *)kcontrol->private_value;
        unsigned int reg = enum_ctrl->reg;
-       unsigned int sel = ucontrol->value.integer.value[0];
+       unsigned int sel = ucontrol->value.enumerated.item[0];
        unsigned int bits;
 
        switch (sel) {
@@ -368,13 +368,13 @@ static int da732x_hpf_get(struct snd_kcontrol *kcontrol,
 
        switch (val) {
        case DA732X_HPF_VOICE_EN:
-               ucontrol->value.integer.value[0] = DA732X_HPF_VOICE;
+               ucontrol->value.enumerated.item[0] = DA732X_HPF_VOICE;
                break;
        case DA732X_HPF_MUSIC_EN:
-               ucontrol->value.integer.value[0] = DA732X_HPF_MUSIC;
+               ucontrol->value.enumerated.item[0] = DA732X_HPF_MUSIC;
                break;
        default:
-               ucontrol->value.integer.value[0] = DA732X_HPF_DISABLED;
+               ucontrol->value.enumerated.item[0] = DA732X_HPF_DISABLED;
                break;
        }
 
index 20dcc496d39c9d8ac01e4d49004f92823fdd37a6..fc22804cabc5942acca6921c034fa98f234041dc 100644 (file)
@@ -1496,7 +1496,7 @@ static int max98088_put_eq_enum(struct snd_kcontrol *kcontrol,
        struct max98088_pdata *pdata = max98088->pdata;
        int channel = max98088_get_channel(codec, kcontrol->id.name);
        struct max98088_cdata *cdata;
-       int sel = ucontrol->value.integer.value[0];
+       int sel = ucontrol->value.enumerated.item[0];
 
        if (channel < 0)
               return channel;
index 1fedac50355e954790a15b84ed6aab49b2bb98e3..3577003f39cf8feaba0d97bb313e277b3b763ce7 100644 (file)
@@ -1499,7 +1499,7 @@ static int max98095_put_eq_enum(struct snd_kcontrol *kcontrol,
        struct max98095_pdata *pdata = max98095->pdata;
        int channel = max98095_get_eq_channel(kcontrol->id.name);
        struct max98095_cdata *cdata;
-       unsigned int sel = ucontrol->value.integer.value[0];
+       unsigned int sel = ucontrol->value.enumerated.item[0];
        struct max98095_eq_cfg *coef_set;
        int fs, best, best_val, i;
        int regmask, regsave;
@@ -1653,7 +1653,7 @@ static int max98095_put_bq_enum(struct snd_kcontrol *kcontrol,
        struct max98095_pdata *pdata = max98095->pdata;
        int channel = max98095_get_bq_channel(codec, kcontrol->id.name);
        struct max98095_cdata *cdata;
-       unsigned int sel = ucontrol->value.integer.value[0];
+       unsigned int sel = ucontrol->value.enumerated.item[0];
        struct max98095_biquad_cfg *coef_set;
        int fs, best, best_val, i;
        int regmask, regsave;
index 781398fb2841565f20d2f7e7a4f893d840e3e7ae..f7a6ce7e5fb12b7ab183428b28d6c69409676d08 100644 (file)
@@ -446,7 +446,7 @@ static int dac33_get_fifo_mode(struct snd_kcontrol *kcontrol,
        struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
        struct tlv320dac33_priv *dac33 = snd_soc_codec_get_drvdata(codec);
 
-       ucontrol->value.integer.value[0] = dac33->fifo_mode;
+       ucontrol->value.enumerated.item[0] = dac33->fifo_mode;
 
        return 0;
 }
@@ -458,17 +458,16 @@ static int dac33_set_fifo_mode(struct snd_kcontrol *kcontrol,
        struct tlv320dac33_priv *dac33 = snd_soc_codec_get_drvdata(codec);
        int ret = 0;
 
-       if (dac33->fifo_mode == ucontrol->value.integer.value[0])
+       if (dac33->fifo_mode == ucontrol->value.enumerated.item[0])
                return 0;
        /* Do not allow changes while stream is running*/
        if (snd_soc_codec_is_active(codec))
                return -EPERM;
 
-       if (ucontrol->value.integer.value[0] < 0 ||
-           ucontrol->value.integer.value[0] >= DAC33_FIFO_LAST_MODE)
+       if (ucontrol->value.enumerated.item[0] >= DAC33_FIFO_LAST_MODE)
                ret = -EINVAL;
        else
-               dac33->fifo_mode = ucontrol->value.integer.value[0];
+               dac33->fifo_mode = ucontrol->value.enumerated.item[0];
 
        return ret;
 }
index 7693c1129babf0e42c58b5ca93832052374cb426..1b79778098d29ffbb377efc24576825cdfe8038c 100644 (file)
@@ -175,7 +175,7 @@ static int snd_wl1273_get_audio_route(struct snd_kcontrol *kcontrol,
        struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
        struct wl1273_priv *wl1273 = snd_soc_codec_get_drvdata(codec);
 
-       ucontrol->value.integer.value[0] = wl1273->mode;
+       ucontrol->value.enumerated.item[0] = wl1273->mode;
 
        return 0;
 }
@@ -193,18 +193,17 @@ static int snd_wl1273_set_audio_route(struct snd_kcontrol *kcontrol,
        struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
        struct wl1273_priv *wl1273 = snd_soc_codec_get_drvdata(codec);
 
-       if (wl1273->mode == ucontrol->value.integer.value[0])
+       if (wl1273->mode == ucontrol->value.enumerated.item[0])
                return 0;
 
        /* Do not allow changes while stream is running */
        if (snd_soc_codec_is_active(codec))
                return -EPERM;
 
-       if (ucontrol->value.integer.value[0] < 0 ||
-           ucontrol->value.integer.value[0] >=  ARRAY_SIZE(wl1273_audio_route))
+       if (ucontrol->value.enumerated.item[0] >=  ARRAY_SIZE(wl1273_audio_route))
                return -EINVAL;
 
-       wl1273->mode = ucontrol->value.integer.value[0];
+       wl1273->mode = ucontrol->value.enumerated.item[0];
 
        return 1;
 }
@@ -219,7 +218,7 @@ static int snd_wl1273_fm_audio_get(struct snd_kcontrol *kcontrol,
 
        dev_dbg(codec->dev, "%s: enter.\n", __func__);
 
-       ucontrol->value.integer.value[0] = wl1273->core->audio_mode;
+       ucontrol->value.enumerated.item[0] = wl1273->core->audio_mode;
 
        return 0;
 }
@@ -233,7 +232,7 @@ static int snd_wl1273_fm_audio_put(struct snd_kcontrol *kcontrol,
 
        dev_dbg(codec->dev, "%s: enter.\n", __func__);
 
-       val = ucontrol->value.integer.value[0];
+       val = ucontrol->value.enumerated.item[0];
        if (wl1273->core->audio_mode == val)
                return 0;
 
index 61299ca372ffc1fc3f83dd88fe823151da7b09b8..6f1024f48b193bc7d3127cc8b12d56d14124413b 100644 (file)
@@ -233,7 +233,7 @@ static int wm8753_get_dai(struct snd_kcontrol *kcontrol,
        struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
        struct wm8753_priv *wm8753 = snd_soc_codec_get_drvdata(codec);
 
-       ucontrol->value.integer.value[0] = wm8753->dai_func;
+       ucontrol->value.enumerated.item[0] = wm8753->dai_func;
        return 0;
 }
 
@@ -244,7 +244,7 @@ static int wm8753_set_dai(struct snd_kcontrol *kcontrol,
        struct wm8753_priv *wm8753 = snd_soc_codec_get_drvdata(codec);
        u16 ioctl;
 
-       if (wm8753->dai_func == ucontrol->value.integer.value[0])
+       if (wm8753->dai_func == ucontrol->value.enumerated.item[0])
                return 0;
 
        if (snd_soc_codec_is_active(codec))
@@ -252,7 +252,7 @@ static int wm8753_set_dai(struct snd_kcontrol *kcontrol,
 
        ioctl = snd_soc_read(codec, WM8753_IOCTL);
 
-       wm8753->dai_func = ucontrol->value.integer.value[0];
+       wm8753->dai_func = ucontrol->value.enumerated.item[0];
 
        if (((ioctl >> 2) & 0x3) == wm8753->dai_func)
                return 1;
index 8172e499e6ed18f33e6f94b2bec1a1647d1c1788..edd7a77091942cd7b873e9c193baed54dd6186c6 100644 (file)
@@ -396,7 +396,7 @@ static int wm8904_put_drc_enum(struct snd_kcontrol *kcontrol,
        struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
        struct wm8904_priv *wm8904 = snd_soc_codec_get_drvdata(codec);
        struct wm8904_pdata *pdata = wm8904->pdata;
-       int value = ucontrol->value.integer.value[0];
+       int value = ucontrol->value.enumerated.item[0];
 
        if (value >= pdata->num_drc_cfgs)
                return -EINVAL;
@@ -467,7 +467,7 @@ static int wm8904_put_retune_mobile_enum(struct snd_kcontrol *kcontrol,
        struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
        struct wm8904_priv *wm8904 = snd_soc_codec_get_drvdata(codec);
        struct wm8904_pdata *pdata = wm8904->pdata;
-       int value = ucontrol->value.integer.value[0];
+       int value = ucontrol->value.enumerated.item[0];
 
        if (value >= pdata->num_retune_mobile_cfgs)
                return -EINVAL;
index c799cca5abeb704645687b863ccedfc6749e9ac2..6b864c0fc2b676cc83f9d03d2218815e7e805919 100644 (file)
@@ -459,7 +459,7 @@ static int wm8958_put_mbc_enum(struct snd_kcontrol *kcontrol,
        struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
        struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
        struct wm8994 *control = wm8994->wm8994;
-       int value = ucontrol->value.integer.value[0];
+       int value = ucontrol->value.enumerated.item[0];
        int reg;
 
        /* Don't allow on the fly reconfiguration */
@@ -549,7 +549,7 @@ static int wm8958_put_vss_enum(struct snd_kcontrol *kcontrol,
        struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
        struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
        struct wm8994 *control = wm8994->wm8994;
-       int value = ucontrol->value.integer.value[0];
+       int value = ucontrol->value.enumerated.item[0];
        int reg;
 
        /* Don't allow on the fly reconfiguration */
@@ -582,7 +582,7 @@ static int wm8958_put_vss_hpf_enum(struct snd_kcontrol *kcontrol,
        struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
        struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
        struct wm8994 *control = wm8994->wm8994;
-       int value = ucontrol->value.integer.value[0];
+       int value = ucontrol->value.enumerated.item[0];
        int reg;
 
        /* Don't allow on the fly reconfiguration */
@@ -749,7 +749,7 @@ static int wm8958_put_enh_eq_enum(struct snd_kcontrol *kcontrol,
        struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
        struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
        struct wm8994 *control = wm8994->wm8994;
-       int value = ucontrol->value.integer.value[0];
+       int value = ucontrol->value.enumerated.item[0];
        int reg;
 
        /* Don't allow on the fly reconfiguration */
index 7350ff654bbf37fe660e2b7a75cc5a15b5d63c68..0c002a5712cb4f261cd84454f38e24a89836519a 100644 (file)
@@ -497,9 +497,9 @@ static int eqmode_get(struct snd_kcontrol *kcontrol,
 
        reg = snd_soc_read(codec, WM8983_EQ1_LOW_SHELF);
        if (reg & WM8983_EQ3DMODE)
-               ucontrol->value.integer.value[0] = 1;
+               ucontrol->value.enumerated.item[0] = 1;
        else
-               ucontrol->value.integer.value[0] = 0;
+               ucontrol->value.enumerated.item[0] = 0;
 
        return 0;
 }
@@ -511,18 +511,18 @@ static int eqmode_put(struct snd_kcontrol *kcontrol,
        unsigned int regpwr2, regpwr3;
        unsigned int reg_eq;
 
-       if (ucontrol->value.integer.value[0] != 0
-           && ucontrol->value.integer.value[0] != 1)
+       if (ucontrol->value.enumerated.item[0] != 0
+           && ucontrol->value.enumerated.item[0] != 1)
                return -EINVAL;
 
        reg_eq = snd_soc_read(codec, WM8983_EQ1_LOW_SHELF);
        switch ((reg_eq & WM8983_EQ3DMODE) >> WM8983_EQ3DMODE_SHIFT) {
        case 0:
-               if (!ucontrol->value.integer.value[0])
+               if (!ucontrol->value.enumerated.item[0])
                        return 0;
                break;
        case 1:
-               if (ucontrol->value.integer.value[0])
+               if (ucontrol->value.enumerated.item[0])
                        return 0;
                break;
        }
@@ -537,7 +537,7 @@ static int eqmode_put(struct snd_kcontrol *kcontrol,
        /* set the desired eqmode */
        snd_soc_update_bits(codec, WM8983_EQ1_LOW_SHELF,
                            WM8983_EQ3DMODE_MASK,
-                           ucontrol->value.integer.value[0]
+                           ucontrol->value.enumerated.item[0]
                            << WM8983_EQ3DMODE_SHIFT);
        /* restore DAC/ADC configuration */
        snd_soc_write(codec, WM8983_POWER_MANAGEMENT_2, regpwr2);
index 9918152a03c7518e5c0c2e299a35145abd808a14..6ac76fe116b028db34c8cdbf3691babeb2a68844 100644 (file)
@@ -531,9 +531,9 @@ static int eqmode_get(struct snd_kcontrol *kcontrol,
 
        reg = snd_soc_read(codec, WM8985_EQ1_LOW_SHELF);
        if (reg & WM8985_EQ3DMODE)
-               ucontrol->value.integer.value[0] = 1;
+               ucontrol->value.enumerated.item[0] = 1;
        else
-               ucontrol->value.integer.value[0] = 0;
+               ucontrol->value.enumerated.item[0] = 0;
 
        return 0;
 }
@@ -545,18 +545,18 @@ static int eqmode_put(struct snd_kcontrol *kcontrol,
        unsigned int regpwr2, regpwr3;
        unsigned int reg_eq;
 
-       if (ucontrol->value.integer.value[0] != 0
-                       && ucontrol->value.integer.value[0] != 1)
+       if (ucontrol->value.enumerated.item[0] != 0
+                       && ucontrol->value.enumerated.item[0] != 1)
                return -EINVAL;
 
        reg_eq = snd_soc_read(codec, WM8985_EQ1_LOW_SHELF);
        switch ((reg_eq & WM8985_EQ3DMODE) >> WM8985_EQ3DMODE_SHIFT) {
        case 0:
-               if (!ucontrol->value.integer.value[0])
+               if (!ucontrol->value.enumerated.item[0])
                        return 0;
                break;
        case 1:
-               if (ucontrol->value.integer.value[0])
+               if (ucontrol->value.enumerated.item[0])
                        return 0;
                break;
        }
@@ -573,7 +573,7 @@ static int eqmode_put(struct snd_kcontrol *kcontrol,
        /* set the desired eqmode */
        snd_soc_update_bits(codec, WM8985_EQ1_LOW_SHELF,
                            WM8985_EQ3DMODE_MASK,
-                           ucontrol->value.integer.value[0]
+                           ucontrol->value.enumerated.item[0]
                            << WM8985_EQ3DMODE_SHIFT);
        /* restore DAC/ADC configuration */
        snd_soc_write(codec, WM8985_POWER_MANAGEMENT_2, regpwr2);
index 2ccbb322df775b803d40fe765958f439d855d41f..a18aecb4993590e6a7bd936b238fee544462a6bc 100644 (file)
@@ -362,7 +362,7 @@ static int wm8994_put_drc_enum(struct snd_kcontrol *kcontrol,
        struct wm8994 *control = wm8994->wm8994;
        struct wm8994_pdata *pdata = &control->pdata;
        int drc = wm8994_get_drc(kcontrol->id.name);
-       int value = ucontrol->value.integer.value[0];
+       int value = ucontrol->value.enumerated.item[0];
 
        if (drc < 0)
                return drc;
@@ -469,7 +469,7 @@ static int wm8994_put_retune_mobile_enum(struct snd_kcontrol *kcontrol,
        struct wm8994 *control = wm8994->wm8994;
        struct wm8994_pdata *pdata = &control->pdata;
        int block = wm8994_get_retune_mobile_block(kcontrol->id.name);
-       int value = ucontrol->value.integer.value[0];
+       int value = ucontrol->value.enumerated.item[0];
 
        if (block < 0)
                return block;
index 8d7d6c01a2f7264905e7a8e29888a417f44fb515..f99b34f7647b092a657fbbc3a70fd01219837351 100644 (file)
@@ -416,7 +416,7 @@ static int wm8996_put_retune_mobile_enum(struct snd_kcontrol *kcontrol,
        struct wm8996_priv *wm8996 = snd_soc_codec_get_drvdata(codec);
        struct wm8996_pdata *pdata = &wm8996->pdata;
        int block = wm8996_get_retune_mobile_block(kcontrol->id.name);
-       int value = ucontrol->value.integer.value[0];
+       int value = ucontrol->value.enumerated.item[0];
 
        if (block < 0)
                return block;
index ccb3b15139ad9dacd41ad801201471d1ed696997..363b3b6676163e8c0b110efd165c5eccd7ec9e1a 100644 (file)
@@ -344,9 +344,9 @@ static int speaker_mode_get(struct snd_kcontrol *kcontrol,
 
        reg = snd_soc_read(codec, WM9081_ANALOGUE_SPEAKER_2);
        if (reg & WM9081_SPK_MODE)
-               ucontrol->value.integer.value[0] = 1;
+               ucontrol->value.enumerated.item[0] = 1;
        else
-               ucontrol->value.integer.value[0] = 0;
+               ucontrol->value.enumerated.item[0] = 0;
 
        return 0;
 }
@@ -365,7 +365,7 @@ static int speaker_mode_put(struct snd_kcontrol *kcontrol,
        unsigned int reg2 = snd_soc_read(codec, WM9081_ANALOGUE_SPEAKER_2);
 
        /* Are we changing anything? */
-       if (ucontrol->value.integer.value[0] ==
+       if (ucontrol->value.enumerated.item[0] ==
            ((reg2 & WM9081_SPK_MODE) != 0))
                return 0;
 
@@ -373,7 +373,7 @@ static int speaker_mode_put(struct snd_kcontrol *kcontrol,
        if (reg_pwr & WM9081_SPK_ENA)
                return -EINVAL;
 
-       if (ucontrol->value.integer.value[0]) {
+       if (ucontrol->value.enumerated.item[0]) {
                /* Class AB */
                reg2 &= ~(WM9081_SPK_INV_MUTE | WM9081_OUT_SPK_CTRL);
                reg2 |= WM9081_SPK_MODE;
index 79e143625ac3d9c14ca72f96dc11acb6a970efdb..9849643ef8099c7f3340a719cf160c8ce633f9c5 100644 (file)
@@ -1212,7 +1212,7 @@ static int wm9713_soc_probe(struct snd_soc_codec *codec)
        if (IS_ERR(wm9713->ac97))
                return PTR_ERR(wm9713->ac97);
 
-       regmap = devm_regmap_init_ac97(wm9713->ac97, &wm9713_regmap_config);
+       regmap = regmap_init_ac97(wm9713->ac97, &wm9713_regmap_config);
        if (IS_ERR(regmap)) {
                snd_soc_free_ac97_codec(wm9713->ac97);
                return PTR_ERR(regmap);
index 33806d487b8ae00711dddd4ec400393dbff59b1f..b9195b9c2b05175278d4e779f7db87aad20d9f63 100644 (file)
@@ -586,7 +586,7 @@ static int wm_adsp_fw_get(struct snd_kcontrol *kcontrol,
        struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
        struct wm_adsp *dsp = snd_soc_codec_get_drvdata(codec);
 
-       ucontrol->value.integer.value[0] = dsp[e->shift_l].fw;
+       ucontrol->value.enumerated.item[0] = dsp[e->shift_l].fw;
 
        return 0;
 }
@@ -599,10 +599,10 @@ static int wm_adsp_fw_put(struct snd_kcontrol *kcontrol,
        struct wm_adsp *dsp = snd_soc_codec_get_drvdata(codec);
        int ret = 0;
 
-       if (ucontrol->value.integer.value[0] == dsp[e->shift_l].fw)
+       if (ucontrol->value.enumerated.item[0] == dsp[e->shift_l].fw)
                return 0;
 
-       if (ucontrol->value.integer.value[0] >= WM_ADSP_NUM_FW)
+       if (ucontrol->value.enumerated.item[0] >= WM_ADSP_NUM_FW)
                return -EINVAL;
 
        mutex_lock(&dsp[e->shift_l].pwr_lock);
@@ -610,7 +610,7 @@ static int wm_adsp_fw_put(struct snd_kcontrol *kcontrol,
        if (dsp[e->shift_l].running || dsp[e->shift_l].compr)
                ret = -EBUSY;
        else
-               dsp[e->shift_l].fw = ucontrol->value.integer.value[0];
+               dsp[e->shift_l].fw = ucontrol->value.enumerated.item[0];
 
        mutex_unlock(&dsp[e->shift_l].pwr_lock);
 
index ed8de1035cda159d0d186f2cded0fb7a97fbceb4..40dfd8a3648408a2cc76bb6e4c4c3872c8e28ac3 100644 (file)
@@ -112,6 +112,20 @@ struct fsl_ssi_rxtx_reg_val {
        struct fsl_ssi_reg_val tx;
 };
 
+static const struct reg_default fsl_ssi_reg_defaults[] = {
+       {CCSR_SSI_SCR,     0x00000000},
+       {CCSR_SSI_SIER,    0x00003003},
+       {CCSR_SSI_STCR,    0x00000200},
+       {CCSR_SSI_SRCR,    0x00000200},
+       {CCSR_SSI_STCCR,   0x00040000},
+       {CCSR_SSI_SRCCR,   0x00040000},
+       {CCSR_SSI_SACNT,   0x00000000},
+       {CCSR_SSI_STMSK,   0x00000000},
+       {CCSR_SSI_SRMSK,   0x00000000},
+       {CCSR_SSI_SACCEN,  0x00000000},
+       {CCSR_SSI_SACCDIS, 0x00000000},
+};
+
 static bool fsl_ssi_readable_reg(struct device *dev, unsigned int reg)
 {
        switch (reg) {
@@ -176,7 +190,8 @@ static const struct regmap_config fsl_ssi_regconfig = {
        .val_bits = 32,
        .reg_stride = 4,
        .val_format_endian = REGMAP_ENDIAN_NATIVE,
-       .num_reg_defaults_raw = CCSR_SSI_SACCDIS / sizeof(uint32_t) + 1,
+       .reg_defaults = fsl_ssi_reg_defaults,
+       .num_reg_defaults = ARRAY_SIZE(fsl_ssi_reg_defaults),
        .readable_reg = fsl_ssi_readable_reg,
        .volatile_reg = fsl_ssi_volatile_reg,
        .precious_reg = fsl_ssi_precious_reg,
@@ -186,7 +201,6 @@ static const struct regmap_config fsl_ssi_regconfig = {
 
 struct fsl_ssi_soc_data {
        bool imx;
-       bool imx21regs; /* imx21-class SSI - no SACC{ST,EN,DIS} regs */
        bool offline_config;
        u32 sisr_write_mask;
 };
@@ -289,7 +303,6 @@ static struct fsl_ssi_soc_data fsl_ssi_mpc8610 = {
 
 static struct fsl_ssi_soc_data fsl_ssi_imx21 = {
        .imx = true,
-       .imx21regs = true,
        .offline_config = true,
        .sisr_write_mask = 0,
 };
@@ -573,12 +586,8 @@ static void fsl_ssi_setup_ac97(struct fsl_ssi_private *ssi_private)
         */
        regmap_write(regs, CCSR_SSI_SACNT,
                        CCSR_SSI_SACNT_AC97EN | CCSR_SSI_SACNT_FV);
-
-       /* no SACC{ST,EN,DIS} regs on imx21-class SSI */
-       if (!ssi_private->soc->imx21regs) {
-               regmap_write(regs, CCSR_SSI_SACCDIS, 0xff);
-               regmap_write(regs, CCSR_SSI_SACCEN, 0x300);
-       }
+       regmap_write(regs, CCSR_SSI_SACCDIS, 0xff);
+       regmap_write(regs, CCSR_SSI_SACCEN, 0x300);
 
        /*
         * Enable SSI, Transmit and Receive. AC97 has to communicate with the
@@ -1388,7 +1397,6 @@ static int fsl_ssi_probe(struct platform_device *pdev)
        struct resource *res;
        void __iomem *iomem;
        char name[64];
-       struct regmap_config regconfig = fsl_ssi_regconfig;
 
        of_id = of_match_device(fsl_ssi_ids, &pdev->dev);
        if (!of_id || !of_id->data)
@@ -1436,25 +1444,15 @@ static int fsl_ssi_probe(struct platform_device *pdev)
                return PTR_ERR(iomem);
        ssi_private->ssi_phys = res->start;
 
-       if (ssi_private->soc->imx21regs) {
-               /*
-                * According to datasheet imx21-class SSI
-                * don't have SACC{ST,EN,DIS} regs.
-                */
-               regconfig.max_register = CCSR_SSI_SRMSK;
-               regconfig.num_reg_defaults_raw =
-                       CCSR_SSI_SRMSK / sizeof(uint32_t) + 1;
-       }
-
        ret = of_property_match_string(np, "clock-names", "ipg");
        if (ret < 0) {
                ssi_private->has_ipg_clk_name = false;
                ssi_private->regs = devm_regmap_init_mmio(&pdev->dev, iomem,
-                       &regconfig);
+                       &fsl_ssi_regconfig);
        } else {
                ssi_private->has_ipg_clk_name = true;
                ssi_private->regs = devm_regmap_init_mmio_clk(&pdev->dev,
-                       "ipg", iomem, &regconfig);
+                       "ipg", iomem, &fsl_ssi_regconfig);
        }
        if (IS_ERR(ssi_private->regs)) {
                dev_err(&pdev->dev, "Failed to init register map\n");
index 2d3afddb0a2e8c83fef54b36eed42d7ff6b69878..a7b96a9a4e0ecb57876413c848cfa91359d42a92 100644 (file)
@@ -367,8 +367,12 @@ static int snd_cht_mc_probe(struct platform_device *pdev)
        }
        card->dev = &pdev->dev;
        sprintf(codec_name, "i2c-%s:00", drv->acpi_card->codec_id);
+
        /* set correct codec name */
-       strcpy((char *)card->dai_link[2].codec_name, codec_name);
+       for (i = 0; i < ARRAY_SIZE(cht_dailink); i++)
+               if (!strcmp(card->dai_link[i].codec_name, "i2c-10EC5645:00"))
+                       card->dai_link[i].codec_name = kstrdup(codec_name, GFP_KERNEL);
+
        snd_soc_card_set_drvdata(card, drv);
        ret_val = devm_snd_soc_register_card(&pdev->dev, card);
        if (ret_val) {
index 49c09a0add797ff95101bee1923b62254e3928a0..34f46c72a0e27cff8cd37108d27b71029cd088c6 100644 (file)
@@ -94,7 +94,7 @@ static const struct soc_enum lo_enum =
 static int headset_get_switch(struct snd_kcontrol *kcontrol,
        struct snd_ctl_elem_value *ucontrol)
 {
-       ucontrol->value.integer.value[0] = hs_switch;
+       ucontrol->value.enumerated.item[0] = hs_switch;
        return 0;
 }
 
@@ -104,12 +104,12 @@ static int headset_set_switch(struct snd_kcontrol *kcontrol,
        struct snd_soc_card *card = snd_kcontrol_chip(kcontrol);
        struct snd_soc_dapm_context *dapm = &card->dapm;
 
-       if (ucontrol->value.integer.value[0] == hs_switch)
+       if (ucontrol->value.enumerated.item[0] == hs_switch)
                return 0;
 
        snd_soc_dapm_mutex_lock(dapm);
 
-       if (ucontrol->value.integer.value[0]) {
+       if (ucontrol->value.enumerated.item[0]) {
                pr_debug("hs_set HS path\n");
                snd_soc_dapm_enable_pin_unlocked(dapm, "Headphones");
                snd_soc_dapm_disable_pin_unlocked(dapm, "EPOUT");
@@ -123,7 +123,7 @@ static int headset_set_switch(struct snd_kcontrol *kcontrol,
 
        snd_soc_dapm_mutex_unlock(dapm);
 
-       hs_switch = ucontrol->value.integer.value[0];
+       hs_switch = ucontrol->value.enumerated.item[0];
 
        return 0;
 }
@@ -148,7 +148,7 @@ static void lo_enable_out_pins(struct snd_soc_dapm_context *dapm)
 static int lo_get_switch(struct snd_kcontrol *kcontrol,
        struct snd_ctl_elem_value *ucontrol)
 {
-       ucontrol->value.integer.value[0] = lo_dac;
+       ucontrol->value.enumerated.item[0] = lo_dac;
        return 0;
 }
 
@@ -158,7 +158,7 @@ static int lo_set_switch(struct snd_kcontrol *kcontrol,
        struct snd_soc_card *card = snd_kcontrol_chip(kcontrol);
        struct snd_soc_dapm_context *dapm = &card->dapm;
 
-       if (ucontrol->value.integer.value[0] == lo_dac)
+       if (ucontrol->value.enumerated.item[0] == lo_dac)
                return 0;
 
        snd_soc_dapm_mutex_lock(dapm);
@@ -168,7 +168,7 @@ static int lo_set_switch(struct snd_kcontrol *kcontrol,
         */
        lo_enable_out_pins(dapm);
 
-       switch (ucontrol->value.integer.value[0]) {
+       switch (ucontrol->value.enumerated.item[0]) {
        case 0:
                pr_debug("set vibra path\n");
                snd_soc_dapm_disable_pin_unlocked(dapm, "VIB1OUT");
@@ -202,7 +202,7 @@ static int lo_set_switch(struct snd_kcontrol *kcontrol,
 
        snd_soc_dapm_mutex_unlock(dapm);
 
-       lo_dac = ucontrol->value.integer.value[0];
+       lo_dac = ucontrol->value.enumerated.item[0];
        return 0;
 }
 
index a294fee431f07363f965a81b4c9ef42eb3a42f58..5a4837dcfce3e615eedf47049ac3923332c8a9bf 100644 (file)
@@ -978,7 +978,7 @@ static int skl_tplg_tlv_control_set(struct snd_kcontrol *kcontrol,
                                return -EFAULT;
                } else {
                        if (copy_from_user(ac->params,
-                                          data + 2 * sizeof(u32), size))
+                                          data + 2, size))
                                return -EFAULT;
                }
 
index 190f868e78b24af37d8442d378c06aeb8ec6aaf2..fdecb70431745bf23c7113bf7abcede1cd5306ef 100644 (file)
@@ -133,7 +133,7 @@ static struct snd_soc_ops n810_ops = {
 static int n810_get_spk(struct snd_kcontrol *kcontrol,
                        struct snd_ctl_elem_value *ucontrol)
 {
-       ucontrol->value.integer.value[0] = n810_spk_func;
+       ucontrol->value.enumerated.item[0] = n810_spk_func;
 
        return 0;
 }
@@ -143,10 +143,10 @@ static int n810_set_spk(struct snd_kcontrol *kcontrol,
 {
        struct snd_soc_card *card =  snd_kcontrol_chip(kcontrol);
 
-       if (n810_spk_func == ucontrol->value.integer.value[0])
+       if (n810_spk_func == ucontrol->value.enumerated.item[0])
                return 0;
 
-       n810_spk_func = ucontrol->value.integer.value[0];
+       n810_spk_func = ucontrol->value.enumerated.item[0];
        n810_ext_control(&card->dapm);
 
        return 1;
@@ -155,7 +155,7 @@ static int n810_set_spk(struct snd_kcontrol *kcontrol,
 static int n810_get_jack(struct snd_kcontrol *kcontrol,
                         struct snd_ctl_elem_value *ucontrol)
 {
-       ucontrol->value.integer.value[0] = n810_jack_func;
+       ucontrol->value.enumerated.item[0] = n810_jack_func;
 
        return 0;
 }
@@ -165,10 +165,10 @@ static int n810_set_jack(struct snd_kcontrol *kcontrol,
 {
        struct snd_soc_card *card =  snd_kcontrol_chip(kcontrol);
 
-       if (n810_jack_func == ucontrol->value.integer.value[0])
+       if (n810_jack_func == ucontrol->value.enumerated.item[0])
                return 0;
 
-       n810_jack_func = ucontrol->value.integer.value[0];
+       n810_jack_func = ucontrol->value.enumerated.item[0];
        n810_ext_control(&card->dapm);
 
        return 1;
@@ -177,7 +177,7 @@ static int n810_set_jack(struct snd_kcontrol *kcontrol,
 static int n810_get_input(struct snd_kcontrol *kcontrol,
                          struct snd_ctl_elem_value *ucontrol)
 {
-       ucontrol->value.integer.value[0] = n810_dmic_func;
+       ucontrol->value.enumerated.item[0] = n810_dmic_func;
 
        return 0;
 }
@@ -187,10 +187,10 @@ static int n810_set_input(struct snd_kcontrol *kcontrol,
 {
        struct snd_soc_card *card =  snd_kcontrol_chip(kcontrol);
 
-       if (n810_dmic_func == ucontrol->value.integer.value[0])
+       if (n810_dmic_func == ucontrol->value.enumerated.item[0])
                return 0;
 
-       n810_dmic_func = ucontrol->value.integer.value[0];
+       n810_dmic_func = ucontrol->value.enumerated.item[0];
        n810_ext_control(&card->dapm);
 
        return 1;
index 5e21f08579d804c5f7895a22e87de6cdff8b79ef..54949242bc7075587e6a73a7235049db2e108734 100644 (file)
@@ -132,7 +132,7 @@ static struct snd_soc_ops rx51_ops = {
 static int rx51_get_spk(struct snd_kcontrol *kcontrol,
                        struct snd_ctl_elem_value *ucontrol)
 {
-       ucontrol->value.integer.value[0] = rx51_spk_func;
+       ucontrol->value.enumerated.item[0] = rx51_spk_func;
 
        return 0;
 }
@@ -142,10 +142,10 @@ static int rx51_set_spk(struct snd_kcontrol *kcontrol,
 {
        struct snd_soc_card *card = snd_kcontrol_chip(kcontrol);
 
-       if (rx51_spk_func == ucontrol->value.integer.value[0])
+       if (rx51_spk_func == ucontrol->value.enumerated.item[0])
                return 0;
 
-       rx51_spk_func = ucontrol->value.integer.value[0];
+       rx51_spk_func = ucontrol->value.enumerated.item[0];
        rx51_ext_control(&card->dapm);
 
        return 1;
@@ -180,7 +180,7 @@ static int rx51_hp_event(struct snd_soc_dapm_widget *w,
 static int rx51_get_input(struct snd_kcontrol *kcontrol,
                          struct snd_ctl_elem_value *ucontrol)
 {
-       ucontrol->value.integer.value[0] = rx51_dmic_func;
+       ucontrol->value.enumerated.item[0] = rx51_dmic_func;
 
        return 0;
 }
@@ -190,10 +190,10 @@ static int rx51_set_input(struct snd_kcontrol *kcontrol,
 {
        struct snd_soc_card *card = snd_kcontrol_chip(kcontrol);
 
-       if (rx51_dmic_func == ucontrol->value.integer.value[0])
+       if (rx51_dmic_func == ucontrol->value.enumerated.item[0])
                return 0;
 
-       rx51_dmic_func = ucontrol->value.integer.value[0];
+       rx51_dmic_func = ucontrol->value.enumerated.item[0];
        rx51_ext_control(&card->dapm);
 
        return 1;
@@ -202,7 +202,7 @@ static int rx51_set_input(struct snd_kcontrol *kcontrol,
 static int rx51_get_jack(struct snd_kcontrol *kcontrol,
                         struct snd_ctl_elem_value *ucontrol)
 {
-       ucontrol->value.integer.value[0] = rx51_jack_func;
+       ucontrol->value.enumerated.item[0] = rx51_jack_func;
 
        return 0;
 }
@@ -212,10 +212,10 @@ static int rx51_set_jack(struct snd_kcontrol *kcontrol,
 {
        struct snd_soc_card *card = snd_kcontrol_chip(kcontrol);
 
-       if (rx51_jack_func == ucontrol->value.integer.value[0])
+       if (rx51_jack_func == ucontrol->value.enumerated.item[0])
                return 0;
 
-       rx51_jack_func = ucontrol->value.integer.value[0];
+       rx51_jack_func = ucontrol->value.enumerated.item[0];
        rx51_ext_control(&card->dapm);
 
        return 1;
index c97dc13d36087628433de516a1b2217df02b492b..dcbb7aa9830c0347af6f7bd2c198639e52d9e24e 100644 (file)
@@ -163,7 +163,7 @@ static struct snd_soc_ops corgi_ops = {
 static int corgi_get_jack(struct snd_kcontrol *kcontrol,
        struct snd_ctl_elem_value *ucontrol)
 {
-       ucontrol->value.integer.value[0] = corgi_jack_func;
+       ucontrol->value.enumerated.item[0] = corgi_jack_func;
        return 0;
 }
 
@@ -172,10 +172,10 @@ static int corgi_set_jack(struct snd_kcontrol *kcontrol,
 {
        struct snd_soc_card *card = snd_kcontrol_chip(kcontrol);
 
-       if (corgi_jack_func == ucontrol->value.integer.value[0])
+       if (corgi_jack_func == ucontrol->value.enumerated.item[0])
                return 0;
 
-       corgi_jack_func = ucontrol->value.integer.value[0];
+       corgi_jack_func = ucontrol->value.enumerated.item[0];
        corgi_ext_control(&card->dapm);
        return 1;
 }
@@ -183,7 +183,7 @@ static int corgi_set_jack(struct snd_kcontrol *kcontrol,
 static int corgi_get_spk(struct snd_kcontrol *kcontrol,
        struct snd_ctl_elem_value *ucontrol)
 {
-       ucontrol->value.integer.value[0] = corgi_spk_func;
+       ucontrol->value.enumerated.item[0] = corgi_spk_func;
        return 0;
 }
 
@@ -192,10 +192,10 @@ static int corgi_set_spk(struct snd_kcontrol *kcontrol,
 {
        struct snd_soc_card *card =  snd_kcontrol_chip(kcontrol);
 
-       if (corgi_spk_func == ucontrol->value.integer.value[0])
+       if (corgi_spk_func == ucontrol->value.enumerated.item[0])
                return 0;
 
-       corgi_spk_func = ucontrol->value.integer.value[0];
+       corgi_spk_func = ucontrol->value.enumerated.item[0];
        corgi_ext_control(&card->dapm);
        return 1;
 }
index 241d0be42d7a0c6779bb6459c5165728cbedb302..62b8377a9d2b93c81f06a8df4c0b5ed2cd7a567f 100644 (file)
@@ -308,17 +308,17 @@ static int magician_set_spk(struct snd_kcontrol *kcontrol,
 static int magician_get_input(struct snd_kcontrol *kcontrol,
                              struct snd_ctl_elem_value *ucontrol)
 {
-       ucontrol->value.integer.value[0] = magician_in_sel;
+       ucontrol->value.enumerated.item[0] = magician_in_sel;
        return 0;
 }
 
 static int magician_set_input(struct snd_kcontrol *kcontrol,
                              struct snd_ctl_elem_value *ucontrol)
 {
-       if (magician_in_sel == ucontrol->value.integer.value[0])
+       if (magician_in_sel == ucontrol->value.enumerated.item[0])
                return 0;
 
-       magician_in_sel = ucontrol->value.integer.value[0];
+       magician_in_sel = ucontrol->value.enumerated.item[0];
 
        switch (magician_in_sel) {
        case MAGICIAN_MIC:
index 84d0e2e508088a260b54911bca03236a50046203..4b3b714f5ee7a67e5538b191a9da99bac86e56f8 100644 (file)
@@ -138,7 +138,7 @@ static struct snd_soc_ops poodle_ops = {
 static int poodle_get_jack(struct snd_kcontrol *kcontrol,
        struct snd_ctl_elem_value *ucontrol)
 {
-       ucontrol->value.integer.value[0] = poodle_jack_func;
+       ucontrol->value.enumerated.item[0] = poodle_jack_func;
        return 0;
 }
 
@@ -147,10 +147,10 @@ static int poodle_set_jack(struct snd_kcontrol *kcontrol,
 {
        struct snd_soc_card *card =  snd_kcontrol_chip(kcontrol);
 
-       if (poodle_jack_func == ucontrol->value.integer.value[0])
+       if (poodle_jack_func == ucontrol->value.enumerated.item[0])
                return 0;
 
-       poodle_jack_func = ucontrol->value.integer.value[0];
+       poodle_jack_func = ucontrol->value.enumerated.item[0];
        poodle_ext_control(&card->dapm);
        return 1;
 }
@@ -158,7 +158,7 @@ static int poodle_set_jack(struct snd_kcontrol *kcontrol,
 static int poodle_get_spk(struct snd_kcontrol *kcontrol,
        struct snd_ctl_elem_value *ucontrol)
 {
-       ucontrol->value.integer.value[0] = poodle_spk_func;
+       ucontrol->value.enumerated.item[0] = poodle_spk_func;
        return 0;
 }
 
@@ -167,10 +167,10 @@ static int poodle_set_spk(struct snd_kcontrol *kcontrol,
 {
        struct snd_soc_card *card =  snd_kcontrol_chip(kcontrol);
 
-       if (poodle_spk_func == ucontrol->value.integer.value[0])
+       if (poodle_spk_func == ucontrol->value.enumerated.item[0])
                return 0;
 
-       poodle_spk_func = ucontrol->value.integer.value[0];
+       poodle_spk_func = ucontrol->value.enumerated.item[0];
        poodle_ext_control(&card->dapm);
        return 1;
 }
index b00222620fd01be5cf00d4118d44f18832ad3bdc..0e02634c8b7f6f41764b75344ae850ff7ce586d7 100644 (file)
@@ -164,7 +164,7 @@ static struct snd_soc_ops spitz_ops = {
 static int spitz_get_jack(struct snd_kcontrol *kcontrol,
        struct snd_ctl_elem_value *ucontrol)
 {
-       ucontrol->value.integer.value[0] = spitz_jack_func;
+       ucontrol->value.enumerated.item[0] = spitz_jack_func;
        return 0;
 }
 
@@ -173,10 +173,10 @@ static int spitz_set_jack(struct snd_kcontrol *kcontrol,
 {
        struct snd_soc_card *card = snd_kcontrol_chip(kcontrol);
 
-       if (spitz_jack_func == ucontrol->value.integer.value[0])
+       if (spitz_jack_func == ucontrol->value.enumerated.item[0])
                return 0;
 
-       spitz_jack_func = ucontrol->value.integer.value[0];
+       spitz_jack_func = ucontrol->value.enumerated.item[0];
        spitz_ext_control(&card->dapm);
        return 1;
 }
@@ -184,7 +184,7 @@ static int spitz_set_jack(struct snd_kcontrol *kcontrol,
 static int spitz_get_spk(struct snd_kcontrol *kcontrol,
        struct snd_ctl_elem_value *ucontrol)
 {
-       ucontrol->value.integer.value[0] = spitz_spk_func;
+       ucontrol->value.enumerated.item[0] = spitz_spk_func;
        return 0;
 }
 
@@ -193,10 +193,10 @@ static int spitz_set_spk(struct snd_kcontrol *kcontrol,
 {
        struct snd_soc_card *card = snd_kcontrol_chip(kcontrol);
 
-       if (spitz_spk_func == ucontrol->value.integer.value[0])
+       if (spitz_spk_func == ucontrol->value.enumerated.item[0])
                return 0;
 
-       spitz_spk_func = ucontrol->value.integer.value[0];
+       spitz_spk_func = ucontrol->value.enumerated.item[0];
        spitz_ext_control(&card->dapm);
        return 1;
 }
index 49518dd642aa18ffd3f0004fe2f425210e820b54..c508f024ecfbc206887aeb9f91943311a279aed7 100644 (file)
@@ -95,7 +95,7 @@ static struct snd_soc_ops tosa_ops = {
 static int tosa_get_jack(struct snd_kcontrol *kcontrol,
        struct snd_ctl_elem_value *ucontrol)
 {
-       ucontrol->value.integer.value[0] = tosa_jack_func;
+       ucontrol->value.enumerated.item[0] = tosa_jack_func;
        return 0;
 }
 
@@ -104,10 +104,10 @@ static int tosa_set_jack(struct snd_kcontrol *kcontrol,
 {
        struct snd_soc_card *card = snd_kcontrol_chip(kcontrol);
 
-       if (tosa_jack_func == ucontrol->value.integer.value[0])
+       if (tosa_jack_func == ucontrol->value.enumerated.item[0])
                return 0;
 
-       tosa_jack_func = ucontrol->value.integer.value[0];
+       tosa_jack_func = ucontrol->value.enumerated.item[0];
        tosa_ext_control(&card->dapm);
        return 1;
 }
@@ -115,7 +115,7 @@ static int tosa_set_jack(struct snd_kcontrol *kcontrol,
 static int tosa_get_spk(struct snd_kcontrol *kcontrol,
        struct snd_ctl_elem_value *ucontrol)
 {
-       ucontrol->value.integer.value[0] = tosa_spk_func;
+       ucontrol->value.enumerated.item[0] = tosa_spk_func;
        return 0;
 }
 
@@ -124,10 +124,10 @@ static int tosa_set_spk(struct snd_kcontrol *kcontrol,
 {
        struct snd_soc_card *card = snd_kcontrol_chip(kcontrol);
 
-       if (tosa_spk_func == ucontrol->value.integer.value[0])
+       if (tosa_spk_func == ucontrol->value.enumerated.item[0])
                return 0;
 
-       tosa_spk_func = ucontrol->value.integer.value[0];
+       tosa_spk_func = ucontrol->value.enumerated.item[0];
        tosa_ext_control(&card->dapm);
        return 1;
 }
index 00b6c9d039cfada651dad6e917be9318dc4ce762..e5101e0d2d372262f8ecf7d0498240cba971b86f 100644 (file)
@@ -355,7 +355,6 @@ static struct regmap_config lpass_cpu_regmap_config = {
        .readable_reg = lpass_cpu_regmap_readable,
        .volatile_reg = lpass_cpu_regmap_volatile,
        .cache_type = REGCACHE_FLAT,
-       .val_format_endian = REGMAP_ENDIAN_LITTLE,
 };
 
 int asoc_qcom_lpass_cpu_platform_probe(struct platform_device *pdev)
index 84d9e77c0fbe1e8d5e737d26beb492c3ce85883b..70a2559b63f9050bcbb86049717518d022463e2f 100644 (file)
@@ -481,10 +481,11 @@ static int i2s_set_sysclk(struct snd_soc_dai *dai,
        unsigned int cdcon_mask = 1 << i2s_regs->cdclkcon_off;
        unsigned int rsrc_mask = 1 << i2s_regs->rclksrc_off;
        u32 mod, mask, val = 0;
+       unsigned long flags;
 
-       spin_lock(i2s->lock);
+       spin_lock_irqsave(i2s->lock, flags);
        mod = readl(i2s->addr + I2SMOD);
-       spin_unlock(i2s->lock);
+       spin_unlock_irqrestore(i2s->lock, flags);
 
        switch (clk_id) {
        case SAMSUNG_I2S_OPCLK:
@@ -575,11 +576,11 @@ static int i2s_set_sysclk(struct snd_soc_dai *dai,
                return -EINVAL;
        }
 
-       spin_lock(i2s->lock);
+       spin_lock_irqsave(i2s->lock, flags);
        mod = readl(i2s->addr + I2SMOD);
        mod = (mod & ~mask) | val;
        writel(mod, i2s->addr + I2SMOD);
-       spin_unlock(i2s->lock);
+       spin_unlock_irqrestore(i2s->lock, flags);
 
        return 0;
 }
@@ -590,6 +591,7 @@ static int i2s_set_fmt(struct snd_soc_dai *dai,
        struct i2s_dai *i2s = to_info(dai);
        int lrp_shift, sdf_shift, sdf_mask, lrp_rlow, mod_slave;
        u32 mod, tmp = 0;
+       unsigned long flags;
 
        lrp_shift = i2s->variant_regs->lrp_off;
        sdf_shift = i2s->variant_regs->sdf_off;
@@ -649,7 +651,7 @@ static int i2s_set_fmt(struct snd_soc_dai *dai,
                return -EINVAL;
        }
 
-       spin_lock(i2s->lock);
+       spin_lock_irqsave(i2s->lock, flags);
        mod = readl(i2s->addr + I2SMOD);
        /*
         * Don't change the I2S mode if any controller is active on this
@@ -657,7 +659,7 @@ static int i2s_set_fmt(struct snd_soc_dai *dai,
         */
        if (any_active(i2s) &&
                ((mod & (sdf_mask | lrp_rlow | mod_slave)) != tmp)) {
-               spin_unlock(i2s->lock);
+               spin_unlock_irqrestore(i2s->lock, flags);
                dev_err(&i2s->pdev->dev,
                                "%s:%d Other DAI busy\n", __func__, __LINE__);
                return -EAGAIN;
@@ -666,7 +668,7 @@ static int i2s_set_fmt(struct snd_soc_dai *dai,
        mod &= ~(sdf_mask | lrp_rlow | mod_slave);
        mod |= tmp;
        writel(mod, i2s->addr + I2SMOD);
-       spin_unlock(i2s->lock);
+       spin_unlock_irqrestore(i2s->lock, flags);
 
        return 0;
 }
@@ -676,6 +678,7 @@ static int i2s_hw_params(struct snd_pcm_substream *substream,
 {
        struct i2s_dai *i2s = to_info(dai);
        u32 mod, mask = 0, val = 0;
+       unsigned long flags;
 
        if (!is_secondary(i2s))
                mask |= (MOD_DC2_EN | MOD_DC1_EN);
@@ -744,11 +747,11 @@ static int i2s_hw_params(struct snd_pcm_substream *substream,
                return -EINVAL;
        }
 
-       spin_lock(i2s->lock);
+       spin_lock_irqsave(i2s->lock, flags);
        mod = readl(i2s->addr + I2SMOD);
        mod = (mod & ~mask) | val;
        writel(mod, i2s->addr + I2SMOD);
-       spin_unlock(i2s->lock);
+       spin_unlock_irqrestore(i2s->lock, flags);
 
        samsung_asoc_init_dma_data(dai, &i2s->dma_playback, &i2s->dma_capture);
 
index 0d37079879002d472c538b69ccc3321c0d7772ad..581175a51ecf730bb8c6a2271d137f049c2a445d 100644 (file)
@@ -3573,7 +3573,7 @@ static int snd_soc_dapm_dai_link_get(struct snd_kcontrol *kcontrol,
 {
        struct snd_soc_dapm_widget *w = snd_kcontrol_chip(kcontrol);
 
-       ucontrol->value.integer.value[0] = w->params_select;
+       ucontrol->value.enumerated.item[0] = w->params_select;
 
        return 0;
 }
@@ -3587,13 +3587,13 @@ static int snd_soc_dapm_dai_link_put(struct snd_kcontrol *kcontrol,
        if (w->power)
                return -EBUSY;
 
-       if (ucontrol->value.integer.value[0] == w->params_select)
+       if (ucontrol->value.enumerated.item[0] == w->params_select)
                return 0;
 
-       if (ucontrol->value.integer.value[0] >= w->num_params)
+       if (ucontrol->value.enumerated.item[0] >= w->num_params)
                return -EINVAL;
 
-       w->params_select = ucontrol->value.integer.value[0];
+       w->params_select = ucontrol->value.enumerated.item[0];
 
        return 0;
 }
index a11cfd20a6a0d2aa86b1d06b8552bc41a8bfd8c9..9102ae172d2a92e06ad6dc9155985b5103703a92 100644 (file)
@@ -1952,6 +1952,9 @@ static void grow_halt_poll_ns(struct kvm_vcpu *vcpu)
        else
                val *= halt_poll_ns_grow;
 
+       if (val > halt_poll_ns)
+               val = halt_poll_ns;
+
        vcpu->halt_poll_ns = val;
        trace_kvm_halt_poll_ns_grow(vcpu->vcpu_id, val, old);
 }
This page took 0.216725 seconds and 5 git commands to generate.