Merge branch 'akpm-current/current'
authorStephen Rothwell <sfr@canb.auug.org.au>
Tue, 13 Sep 2016 03:47:33 +0000 (13:47 +1000)
committerStephen Rothwell <sfr@canb.auug.org.au>
Tue, 13 Sep 2016 03:47:33 +0000 (13:47 +1000)
25 files changed:
1  2 
MAINTAINERS
arch/arm/kernel/smp.c
arch/arm64/kernel/process.c
arch/arm64/kernel/vmlinux.lds.S
arch/mips/cavium-octeon/setup.c
arch/x86/include/asm/smp.h
arch/x86/kernel/e820.c
arch/x86/kernel/process.c
arch/xtensa/kernel/vmlinux.lds.S
drivers/char/random.c
fs/nfs/internal.h
fs/proc/base.c
fs/proc/task_mmu.c
include/asm-generic/vmlinux.lds.h
include/linux/cpu.h
include/linux/random.h
include/linux/relay.h
include/linux/sched.h
init/main.c
kernel/fork.c
kernel/relay.c
mm/huge_memory.c
mm/page-writeback.c
mm/page_alloc.c
mm/slab.c

diff --combined MAINTAINERS
index a93d351ac6f8cb43a2295b29a04e28fc574bc037,cab827c8d09f50ce1cdab8d7d9031e810d842f82..388b572d43a8d21761d5456589969d22d8c586de
@@@ -636,15 -636,6 +636,15 @@@ F:       drivers/tty/serial/altera_jtaguart.
  F:    include/linux/altera_uart.h
  F:    include/linux/altera_jtaguart.h
  
 +AMAZON ETHERNET DRIVERS
 +M:    Netanel Belgazal <netanel@annapurnalabs.com>
 +R:    Saeed Bishara <saeed@annapurnalabs.com>
 +R:    Zorik Machulsky <zorik@annapurnalabs.com>
 +L:    netdev@vger.kernel.org
 +S:    Supported
 +F:    Documentation/networking/ena.txt
 +F:    drivers/net/ethernet/amazon/
 +
  AMD CRYPTOGRAPHIC COPROCESSOR (CCP) DRIVER
  M:    Tom Lendacky <thomas.lendacky@amd.com>
  M:    Gary Hook <gary.hook@amd.com>
@@@ -1001,7 -992,6 +1001,7 @@@ M:       Chen-Yu Tsai <wens@csie.org
  L:    linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
  S:    Maintained
  N:    sun[x456789]i
 +F:    arch/arm/boot/dts/ntc-gr8*
  
  ARM/Allwinner SoC Clock Support
  M:    Emilio López <emilio@elopez.com.ar>
@@@ -1019,7 -1009,6 +1019,7 @@@ F:      arch/arm/mach-meson
  F:    arch/arm/boot/dts/meson*
  F:    arch/arm64/boot/dts/amlogic/
  F:    drivers/pinctrl/meson/
 +F:      drivers/mmc/host/meson*
  N:    meson
  
  ARM/Annapurna Labs ALPINE ARCHITECTURE
@@@ -1636,7 -1625,6 +1636,7 @@@ N:      rockchi
  ARM/SAMSUNG EXYNOS ARM ARCHITECTURES
  M:    Kukjin Kim <kgene@kernel.org>
  M:    Krzysztof Kozlowski <krzk@kernel.org>
 +R:    Javier Martinez Canillas <javier@osg.samsung.com>
  L:    linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
  L:    linux-samsung-soc@vger.kernel.org (moderated for non-subscribers)
  S:    Maintained
@@@ -1688,6 -1676,14 +1688,6 @@@ S:     Maintaine
  F:    arch/arm/plat-samsung/s5p-dev-mfc.c
  F:    drivers/media/platform/s5p-mfc/
  
 -ARM/SAMSUNG S5P SERIES TV SUBSYSTEM SUPPORT
 -M:    Kyungmin Park <kyungmin.park@samsung.com>
 -M:    Tomasz Stanislawski <t.stanislaws@samsung.com>
 -L:    linux-arm-kernel@lists.infradead.org
 -L:    linux-media@vger.kernel.org
 -S:    Maintained
 -F:    drivers/media/platform/s5p-tv/
 -
  ARM/SAMSUNG S5P SERIES HDMI CEC SUBSYSTEM SUPPORT
  M:    Kyungmin Park <kyungmin.park@samsung.com>
  L:    linux-arm-kernel@lists.infradead.org
@@@ -1845,7 -1841,6 +1845,7 @@@ F:      arch/arm64/boot/dts/socionext
  F:    drivers/bus/uniphier-system-bus.c
  F:    drivers/i2c/busses/i2c-uniphier*
  F:    drivers/pinctrl/uniphier/
 +F:    drivers/reset/reset-uniphier.c
  F:    drivers/tty/serial/8250/8250_uniphier.c
  N:    uniphier
  
@@@ -1980,13 -1975,6 +1980,13 @@@ S:    Maintaine
  F:    drivers/media/i2c/as3645a.c
  F:    include/media/i2c/as3645a.h
  
 +ASAHI KASEI AK8974 DRIVER
 +M:    Linus Walleij <linus.walleij@linaro.org>
 +L:    linux-iio@vger.kernel.org
 +W:    http://www.akm.com/
 +S:    Supported
 +F:    drivers/iio/magnetometer/ak8974.c
 +
  ASC7621 HARDWARE MONITOR DRIVER
  M:    George Joseph <george.joseph@fairview5.com>
  L:    linux-hwmon@vger.kernel.org
@@@ -2234,9 -2222,9 +2234,9 @@@ S:      Maintaine
  F:    drivers/net/wireless/atmel/atmel*
  
  ATMEL MAXTOUCH DRIVER
 -M:    Nick Dyer <nick.dyer@itdev.co.uk>
 -T:    git git://github.com/atmel-maxtouch/linux.git
 -S:    Supported
 +M:    Nick Dyer <nick@shmanahar.org>
 +T:    git git://github.com/ndyer/linux.git
 +S:    Maintained
  F:    Documentation/devicetree/bindings/input/atmel,maxtouch.txt
  F:    drivers/input/touchscreen/atmel_mxt_ts.c
  F:    include/linux/platform_data/atmel_mxt_ts.h
@@@ -2497,7 -2485,7 +2497,7 @@@ F:      include/net/bluetooth
  BONDING DRIVER
  M:    Jay Vosburgh <j.vosburgh@gmail.com>
  M:    Veaceslav Falico <vfalico@gmail.com>
 -M:    Andy Gospodarek <gospo@cumulusnetworks.com>
 +M:    Andy Gospodarek <andy@greyhouse.net>
  L:    netdev@vger.kernel.org
  W:    http://sourceforge.net/projects/bonding/
  S:    Supported
@@@ -2900,14 -2888,6 +2900,14 @@@ S:    Maintaine
  F:    drivers/iio/light/cm*
  F:    Documentation/devicetree/bindings/i2c/trivial-devices.txt
  
 +CAVIUM I2C DRIVER
 +M:    Jan Glauber <jglauber@cavium.com>
 +M:    David Daney <david.daney@cavium.com>
 +W:    http://www.cavium.com
 +S:    Supported
 +F:    drivers/i2c/busses/i2c-octeon*
 +F:    drivers/i2c/busses/i2c-thunderx*
 +
  CAVIUM LIQUIDIO NETWORK DRIVER
  M:     Derek Chickles <derek.chickles@caviumnetworks.com>
  M:     Satanand Burla <satananda.burla@caviumnetworks.com>
@@@ -3155,7 -3135,7 +3155,7 @@@ L:      cocci@systeme.lip6.fr (moderated fo
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/mmarek/kbuild.git misc
  W:    http://coccinelle.lip6.fr/
  S:    Supported
 -F:    Documentation/coccinelle.txt
 +F:    Documentation/dev-tools/coccinelle.rst
  F:    scripts/coccinelle/
  F:    scripts/coccicheck
  
@@@ -3181,7 -3161,6 +3181,7 @@@ COMMON CLK FRAMEWOR
  M:    Michael Turquette <mturquette@baylibre.com>
  M:    Stephen Boyd <sboyd@codeaurora.org>
  L:    linux-clk@vger.kernel.org
 +Q:    http://patchwork.kernel.org/project/linux-clk/list/
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/clk/linux.git
  S:    Maintained
  F:    Documentation/devicetree/bindings/clock/
@@@ -3290,7 -3269,7 +3290,7 @@@ S:      Maintaine
  F:    drivers/net/wan/cosa*
  
  CPMAC ETHERNET DRIVER
 -M:    Florian Fainelli <florian@openwrt.org>
 +M:    Florian Fainelli <f.fainelli@gmail.com>
  L:    netdev@vger.kernel.org
  S:    Maintained
  F:    drivers/net/ethernet/ti/cpmac.c
@@@ -3783,8 -3762,8 +3783,8 @@@ F:      drivers/leds/leds-da90??.
  F:    drivers/mfd/da903x.c
  F:    drivers/mfd/da90??-*.c
  F:    drivers/mfd/da91??-*.c
 -F:    drivers/power/da9052-battery.c
 -F:    drivers/power/da91??-*.c
 +F:    drivers/power/supply/da9052-battery.c
 +F:    drivers/power/supply/da91??-*.c
  F:    drivers/regulator/da903x.c
  F:    drivers/regulator/da9???-regulator.[ch]
  F:    drivers/rtc/rtc-da90??.c
@@@ -3800,12 -3779,6 +3800,12 @@@ F:    include/linux/regulator/da9211.
  F:    include/sound/da[79]*.h
  F:    sound/soc/codecs/da[79]*.[ch]
  
 +DIAMOND SYSTEMS GPIO-MM GPIO DRIVER
 +M:    William Breathitt Gray <vilhelm.gray@gmail.com>
 +L:    linux-gpio@vger.kernel.org
 +S:    Maintained
 +F:    drivers/gpio/gpio-gpio-mm.c
 +
  DIGI NEO AND CLASSIC PCI PRODUCTS
  M:    Lidza Louina <lidza.louina@gmail.com>
  M:    Mark Hounschell <markh@compro.net>
@@@ -4101,14 -4074,6 +4101,14 @@@ S:    Orphan / Obsolet
  F:    drivers/gpu/drm/i810/
  F:    include/uapi/drm/i810_drm.h
  
 +DRM DRIVERS FOR MEDIATEK
 +M:    CK Hu <ck.hu@mediatek.com>
 +M:    Philipp Zabel <p.zabel@pengutronix.de>
 +L:    dri-devel@lists.freedesktop.org
 +S:    Supported
 +F:    drivers/gpu/drm/mediatek/
 +F:    Documentation/devicetree/bindings/display/mediatek/
 +
  DRM DRIVER FOR MSM ADRENO GPU
  M:    Rob Clark <robdclark@gmail.com>
  L:    linux-arm-msm@vger.kernel.org
@@@ -4439,6 -4404,7 +4439,6 @@@ F:      Documentation/filesystems/ecryptfs.t
  F:    fs/ecryptfs/
  
  EDAC-CORE
 -M:    Doug Thompson <dougthompson@xmission.com>
  M:    Borislav Petkov <bp@alien8.de>
  M:    Mauro Carvalho Chehab <mchehab@s-opensource.com>
  M:    Mauro Carvalho Chehab <mchehab@kernel.org>
@@@ -4451,12 -4417,14 +4451,12 @@@ F:   drivers/edac
  F:    include/linux/edac.h
  
  EDAC-AMD64
 -M:    Doug Thompson <dougthompson@xmission.com>
  M:    Borislav Petkov <bp@alien8.de>
  L:    linux-edac@vger.kernel.org
  S:    Maintained
  F:    drivers/edac/amd64_edac*
  
  EDAC-CALXEDA
 -M:    Doug Thompson <dougthompson@xmission.com>
  M:    Robert Richter <rric@kernel.org>
  L:    linux-edac@vger.kernel.org
  S:    Maintained
@@@ -4472,21 -4440,17 +4472,21 @@@ F:   drivers/edac/octeon_edac
  
  EDAC-E752X
  M:    Mark Gross <mark.gross@intel.com>
 -M:    Doug Thompson <dougthompson@xmission.com>
  L:    linux-edac@vger.kernel.org
  S:    Maintained
  F:    drivers/edac/e752x_edac.c
  
  EDAC-E7XXX
 -M:    Doug Thompson <dougthompson@xmission.com>
  L:    linux-edac@vger.kernel.org
  S:    Maintained
  F:    drivers/edac/e7xxx_edac.c
  
 +EDAC-FSL_DDR
 +M:    York Sun <york.sun@nxp.com>
 +L:    linux-edac@vger.kernel.org
 +S:    Maintained
 +F:    drivers/edac/fsl_ddr_edac.*
 +
  EDAC-GHES
  M:    Mauro Carvalho Chehab <mchehab@s-opensource.com>
  M:    Mauro Carvalho Chehab <mchehab@kernel.org>
@@@ -4501,11 -4465,13 +4501,11 @@@ S:   Maintaine
  F:    drivers/edac/i82443bxgx_edac.c
  
  EDAC-I3000
 -M:    Jason Uhlenkott <juhlenko@akamai.com>
  L:    linux-edac@vger.kernel.org
 -S:    Maintained
 +S:    Orphan
  F:    drivers/edac/i3000_edac.c
  
  EDAC-I5000
 -M:    Doug Thompson <dougthompson@xmission.com>
  L:    linux-edac@vger.kernel.org
  S:    Maintained
  F:    drivers/edac/i5000_edac.c
@@@ -5169,7 -5135,7 +5169,7 @@@ GCOV BASED KERNEL PROFILIN
  M:    Peter Oberparleiter <oberpar@linux.vnet.ibm.com>
  S:    Maintained
  F:    kernel/gcov/
 -F:    Documentation/gcov.txt
 +F:    Documentation/dev-tools/gcov.rst
  
  GDT SCSI DISK ARRAY CONTROLLER DRIVER
  M:    Achim Leubner <achim_leubner@adaptec.com>
@@@ -5608,14 -5574,6 +5608,14 @@@ S:    Maintaine
  F:    drivers/net/ethernet/hisilicon/
  F:    Documentation/devicetree/bindings/net/hisilicon*.txt
  
 +HISILICON ROCE DRIVER
 +M:    Lijun Ou <oulijun@huawei.com>
 +M:    Wei Hu(Xavier) <xavier.huwei@huawei.com>
 +L:    linux-rdma@vger.kernel.org
 +S:    Maintained
 +F:    drivers/infiniband/hw/hns/
 +F:    Documentation/devicetree/bindings/infiniband/hisilicon-hns-roce.txt
 +
  HISILICON SAS Controller
  M:    John Garry <john.garry@huawei.com>
  W:    http://www.hisilicon.com
@@@ -5693,14 -5651,6 +5693,14 @@@ M:    Nadia Yvette Chambers <nyc@holomorph
  S:    Maintained
  F:    fs/hugetlbfs/
  
 +HVA ST MEDIA DRIVER
 +M:    Jean-Christophe Trotin <jean-christophe.trotin@st.com>
 +L:    linux-media@vger.kernel.org
 +T:    git git://linuxtv.org/media_tree.git
 +W:    https://linuxtv.org
 +S:    Supported
 +F:    drivers/media/platform/sti/hva
 +
  Hyper-V CORE AND DRIVERS
  M:    "K. Y. Srinivasan" <kys@microsoft.com>
  M:    Haiyang Zhang <haiyangz@microsoft.com>
@@@ -5727,8 -5677,6 +5727,8 @@@ S:      Maintaine
  F:    Documentation/i2c/i2c-topology
  F:    Documentation/i2c/muxes/
  F:    Documentation/devicetree/bindings/i2c/i2c-mux*
 +F:    Documentation/devicetree/bindings/i2c/i2c-arb*
 +F:    Documentation/devicetree/bindings/i2c/i2c-gate*
  F:    drivers/i2c/i2c-mux.c
  F:    drivers/i2c/muxes/
  F:    include/linux/i2c-mux.h
@@@ -6045,12 -5993,6 +6045,12 @@@ M:    Zubair Lutfullah Kakakhel <Zubair.Ka
  S:    Maintained
  F:    drivers/dma/dma-jz4780.c
  
 +INGENIC JZ4780 NAND DRIVER
 +M:    Harvey Hunt <harveyhuntnexus@gmail.com>
 +L:    linux-mtd@lists.infradead.org
 +S:    Maintained
 +F:    drivers/mtd/nand/jz4780_*
 +
  INTEGRITY MEASUREMENT ARCHITECTURE (IMA)
  M:    Mimi Zohar <zohar@linux.vnet.ibm.com>
  M:    Dmitry Kasatkin <dmitry.kasatkin@gmail.com>
@@@ -6152,13 -6094,6 +6152,13 @@@ T:    git git://git.kernel.org/pub/scm/lin
  S:    Supported
  F:    drivers/idle/intel_idle.c
  
 +INTEL INTEGRATED SENSOR HUB DRIVER
 +M:    Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>
 +M:    Jiri Kosina <jikos@kernel.org>
 +L:    linux-input@vger.kernel.org
 +S:    Maintained
 +F:    drivers/hid/intel-ish-hid/
 +
  INTEL PSTATE DRIVER
  M:    Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>
  M:    Len Brown <lenb@kernel.org>
@@@ -6167,7 -6102,7 +6167,7 @@@ S:      Supporte
  F:    drivers/cpufreq/intel_pstate.c
  
  INTEL FRAMEBUFFER DRIVER (excluding 810 and 815)
- M:    Maik Broemme <mbroemme@plusserver.de>
+ M:    Maik Broemme <mbroemme@libmpq.org>
  L:    linux-fbdev@vger.kernel.org
  S:    Maintained
  F:    Documentation/fb/intelfb.txt
@@@ -6669,7 -6604,7 +6669,7 @@@ L:      kasan-dev@googlegroups.co
  S:    Maintained
  F:    arch/*/include/asm/kasan.h
  F:    arch/*/mm/kasan_init*
 -F:    Documentation/kasan.txt
 +F:    Documentation/dev-tools/kasan.rst
  F:    include/linux/kasan*.h
  F:    lib/test_kasan.c
  F:    mm/kasan/
@@@ -6885,7 -6820,7 +6885,7 @@@ KMEMCHEC
  M:    Vegard Nossum <vegardno@ifi.uio.no>
  M:    Pekka Enberg <penberg@kernel.org>
  S:    Maintained
 -F:    Documentation/kmemcheck.txt
 +F:    Documentation/dev-tools/kmemcheck.rst
  F:    arch/x86/include/asm/kmemcheck.h
  F:    arch/x86/mm/kmemcheck/
  F:    include/linux/kmemcheck.h
@@@ -6894,7 -6829,7 +6894,7 @@@ F:      mm/kmemcheck.
  KMEMLEAK
  M:    Catalin Marinas <catalin.marinas@arm.com>
  S:    Maintained
 -F:    Documentation/kmemleak.txt
 +F:    Documentation/dev-tools/kmemleak.rst
  F:    include/linux/kmemleak.h
  F:    mm/kmemleak.c
  F:    mm/kmemleak-test.c
@@@ -7507,8 -7442,9 +7507,8 @@@ F:      Documentation/hwmon/max2075
  F:    drivers/hwmon/max20751.c
  
  MAX6650 HARDWARE MONITOR AND FAN CONTROLLER DRIVER
 -M:    "Hans J. Koch" <hjk@hansjkoch.de>
  L:    linux-hwmon@vger.kernel.org
 -S:    Maintained
 +S:    Orphan
  F:    Documentation/hwmon/max6650
  F:    drivers/hwmon/max6650.c
  
@@@ -7533,8 -7469,8 +7533,8 @@@ M:      Krzysztof Kozlowski <krzk@kernel.org
  M:    Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com>
  L:    linux-pm@vger.kernel.org
  S:    Supported
 -F:    drivers/power/max14577_charger.c
 -F:    drivers/power/max77693_charger.c
 +F:    drivers/power/supply/max14577_charger.c
 +F:    drivers/power/supply/max77693_charger.c
  
  MAXIM MAX77802 MULTIFUNCTION PMIC DEVICE DRIVERS
  M:    Javier Martinez Canillas <javier@osg.samsung.com>
@@@ -7579,12 -7515,6 +7579,12 @@@ L:    linux-iio@vger.kernel.or
  S:    Maintained
  F:    drivers/iio/potentiometer/mcp4531.c
  
 +MEASUREMENT COMPUTING CIO-DAC IIO DRIVER
 +M:    William Breathitt Gray <vilhelm.gray@gmail.com>
 +L:    linux-iio@vger.kernel.org
 +S:    Maintained
 +F:    drivers/iio/dac/cio-dac.c
 +
  MEDIA DRIVERS FOR RENESAS - FCP
  M:    Laurent Pinchart <laurent.pinchart@ideasonboard.com>
  L:    linux-media@vger.kernel.org
@@@ -7737,14 -7667,6 +7737,14 @@@ W:    http://www.mellanox.co
  Q:    http://patchwork.ozlabs.org/project/netdev/list/
  F:    drivers/net/ethernet/mellanox/mlxsw/
  
 +MELLANOX MLXCPLD LED DRIVER
 +M:    Vadim Pasternak <vadimp@mellanox.com>
 +L:    linux-leds@vger.kernel.org
 +S:    Supported
 +W:    http://www.mellanox.com
 +F:    drivers/leds/leds-mlxcpld.c
 +F:    Documentation/leds/leds-mlxcpld.txt
 +
  SOFT-ROCE DRIVER (rxe)
  M:    Moni Shoua <monis@mellanox.com>
  L:    linux-rdma@vger.kernel.org
@@@ -7830,14 -7752,6 +7830,14 @@@ T:    git git://git.monstr.eu/linux-2.6-mi
  S:    Supported
  F:    arch/microblaze/
  
 +MICROCHIP / ATMEL ISC DRIVER
 +M:    Songjun Wu <songjun.wu@microchip.com>
 +L:    linux-media@vger.kernel.org
 +S:    Supported
 +F:    drivers/media/platform/atmel/atmel-isc.c
 +F:    drivers/media/platform/atmel/atmel-isc-regs.h
 +F:    devicetree/bindings/media/atmel-isc.txt
 +
  MICROSOFT SURFACE PRO 3 BUTTON DRIVER
  M:    Chen Yu <yu.c.chen@intel.com>
  L:    platform-driver-x86@vger.kernel.org
@@@ -7924,18 -7838,6 +7924,18 @@@ W:    http://www.melexis.co
  S:    Supported
  F:    drivers/iio/temperature/mlx90614.c
  
 +MICROSEMI SMART ARRAY SMARTPQI DRIVER (smartpqi)
 +M:    Don Brace <don.brace@microsemi.com>
 +L:    esc.storagedev@microsemi.com
 +L:    linux-scsi@vger.kernel.org
 +S:    Supported
 +F:    drivers/scsi/smartpqi/smartpqi*.[ch]
 +F:    drivers/scsi/smartpqi/Kconfig
 +F:    drivers/scsi/smartpqi/Makefile
 +F:    include/linux/cciss*.h
 +F:    include/uapi/linux/cciss*.h
 +F:    Documentation/scsi/smartpqi.txt
 +
  MN88472 MEDIA DRIVER
  M:    Antti Palosaari <crope@iki.fi>
  L:    linux-media@vger.kernel.org
@@@ -8061,7 -7963,6 +8061,7 @@@ MULTIFUNCTION DEVICES (MFD
  M:    Lee Jones <lee.jones@linaro.org>
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/lee/mfd.git
  S:    Supported
 +F:    Documentation/devicetree/bindings/mfd/
  F:    drivers/mfd/
  F:    include/linux/mfd/
  
@@@ -8441,11 -8342,11 +8441,11 @@@ R:   Pali Rohár <pali.rohar@gmail.com
  F:    include/linux/power/bq2415x_charger.h
  F:    include/linux/power/bq27xxx_battery.h
  F:    include/linux/power/isp1704_charger.h
 -F:    drivers/power/bq2415x_charger.c
 -F:    drivers/power/bq27xxx_battery.c
 -F:    drivers/power/bq27xxx_battery_i2c.c
 -F:    drivers/power/isp1704_charger.c
 -F:    drivers/power/rx51_battery.c
 +F:    drivers/power/supply/bq2415x_charger.c
 +F:    drivers/power/supply/bq27xxx_battery.c
 +F:    drivers/power/supply/bq27xxx_battery_i2c.c
 +F:    drivers/power/supply/isp1704_charger.c
 +F:    drivers/power/supply/rx51_battery.c
  
  NTB DRIVER CORE
  M:    Jon Mason <jdmason@kudzu.us>
@@@ -8946,7 -8847,6 +8946,7 @@@ S:      Supporte
  F:    Documentation/virtual/paravirt_ops.txt
  F:    arch/*/kernel/paravirt*
  F:    arch/*/include/asm/paravirt.h
 +F:    include/linux/hypervisor.h
  
  PARIDE DRIVERS FOR PARALLEL PORT IDE DEVICES
  M:    Tim Waugh <tim@cyberelk.net>
@@@ -9445,12 -9345,16 +9445,12 @@@ F:   drivers/powercap
  
  POWER SUPPLY CLASS/SUBSYSTEM and DRIVERS
  M:    Sebastian Reichel <sre@kernel.org>
 -M:    Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
 -M:    David Woodhouse <dwmw2@infradead.org>
  L:    linux-pm@vger.kernel.org
 -T:    git git://git.infradead.org/battery-2.6.git
 +T:    git git://git.kernel.org/pub/scm/linux/kernel/git/sre/linux-power-supply.git
  S:    Maintained
 -F:    Documentation/devicetree/bindings/power/
 -F:    Documentation/devicetree/bindings/power_supply/
 +F:    Documentation/devicetree/bindings/power/supply/
  F:    include/linux/power_supply.h
 -F:    drivers/power/
 -X:    drivers/power/avs/
 +F:    drivers/power/supply/
  
  POWER STATE COORDINATION INTERFACE (PSCI)
  M:    Mark Rutland <mark.rutland@arm.com>
@@@ -9786,12 -9690,6 +9786,12 @@@ T:    git git://git.kernel.org/pub/scm/lin
  S:    Supported
  F:    drivers/net/wireless/ath/ath10k/
  
 +QUALCOMM EMAC GIGABIT ETHERNET DRIVER
 +M:    Timur Tabi <timur@codeaurora.org>
 +L:    netdev@vger.kernel.org
 +S:    Supported
 +F:    drivers/net/ethernet/qualcomm/emac/
 +
  QUALCOMM HEXAGON ARCHITECTURE
  M:    Richard Kuo <rkuo@codeaurora.org>
  L:    linux-hexagon@vger.kernel.org
@@@ -10012,12 -9910,6 +10012,12 @@@ F:  drivers/rpmsg
  F:    Documentation/rpmsg.txt
  F:    include/linux/rpmsg.h
  
 +RENESAS CLOCK DRIVERS
 +M:    Geert Uytterhoeven <geert+renesas@glider.be>
 +L:    linux-renesas-soc@vger.kernel.org
 +S:    Supported
 +F:    drivers/clk/renesas/
 +
  RENESAS ETHERNET DRIVERS
  R:    Sergei Shtylyov <sergei.shtylyov@cogentembedded.com>
  L:    netdev@vger.kernel.org
@@@ -10053,7 -9945,6 +10053,7 @@@ F:    net/rfkill
  
  RHASHTABLE
  M:    Thomas Graf <tgraf@suug.ch>
 +M:    Herbert Xu <herbert@gondor.apana.org.au>
  L:    netdev@vger.kernel.org
  S:    Maintained
  F:    lib/rhashtable.c
@@@ -10462,8 -10353,8 +10462,8 @@@ F:   drivers/thunderbolt
  TI BQ27XXX POWER SUPPLY DRIVER
  R:    Andrew F. Davis <afd@ti.com>
  F:    include/linux/power/bq27xxx_battery.h
 -F:    drivers/power/bq27xxx_battery.c
 -F:    drivers/power/bq27xxx_battery_i2c.c
 +F:    drivers/power/supply/bq27xxx_battery.c
 +F:    drivers/power/supply/bq27xxx_battery_i2c.c
  
  TIMEKEEPING, CLOCKSOURCE CORE, NTP, ALARMTIMER
  M:    John Stultz <john.stultz@linaro.org>
@@@ -10691,12 -10582,12 +10691,12 @@@ S:        Maintaine
  F:    drivers/misc/phantom.c
  F:    include/uapi/linux/phantom.h
  
 -SERVER ENGINES 10Gbps iSCSI - BladeEngine 2 DRIVER
 -M:    Jayamohan Kallickal <jayamohan.kallickal@avagotech.com>
 -M:    Ketan Mukadam <ketan.mukadam@avagotech.com>
 -M:    John Soni Jose <sony.john@avagotech.com>
 +Emulex 10Gbps iSCSI - OneConnect DRIVER
 +M:    Subbu Seetharaman <subbu.seetharaman@broadcom.com>
 +M:    Ketan Mukadam <ketan.mukadam@broadcom.com>
 +M:    Jitendra Bhivare <jitendra.bhivare@broadcom.com>
  L:    linux-scsi@vger.kernel.org
 -W:    http://www.avagotech.com
 +W:    http://www.broadcom.com
  S:    Supported
  F:    drivers/scsi/be2iscsi/
  
@@@ -11223,7 -11114,6 +11223,7 @@@ F:   Documentation/spi
  F:    drivers/spi/
  F:    include/linux/spi/
  F:    include/uapi/linux/spi/
 +F:    tools/spi/
  
  SPIDERNET NETWORK DRIVER for CELL
  M:    Ishizaki Kou <kou.ishizaki@toshiba.co.jp>
@@@ -11294,7 -11184,6 +11294,7 @@@ F:   drivers/staging/media/lirc
  STAGING - LUSTRE PARALLEL FILESYSTEM
  M:    Oleg Drokin <oleg.drokin@intel.com>
  M:    Andreas Dilger <andreas.dilger@intel.com>
 +M:    James Simmons <jsimmons@infradead.org>
  L:    lustre-devel@lists.lustre.org (moderated for non-subscribers)
  W:    http://wiki.lustre.org/
  S:    Maintained
@@@ -11321,6 -11210,13 +11321,6 @@@ M:  Florian Schilhabel <florian.c.schilh
  S:    Odd Fixes
  F:    drivers/staging/rtl8712/
  
 -STAGING - REALTEK RTL8723U WIRELESS DRIVER
 -M:    Larry Finger <Larry.Finger@lwfinger.net>
 -M:    Jes Sorensen <Jes.Sorensen@redhat.com>
 -L:    linux-wireless@vger.kernel.org
 -S:    Maintained
 -F:    drivers/staging/rtl8723au/
 -
  STAGING - SILICON MOTION SM750 FRAME BUFFER DRIVER
  M:    Sudip Mukherjee <sudipm.mukherjee@gmail.com>
  M:    Teddy Wang <teddy.wang@siliconmotion.com>
@@@ -11460,14 -11356,6 +11460,14 @@@ T: git git://git.kernel.org/pub/scm/lin
  S:    Supported
  F:    drivers/mfd/syscon.c
  
 +SYSTEM RESET/SHUTDOWN DRIVERS
 +M:    Sebastian Reichel <sre@kernel.org>
 +L:    linux-pm@vger.kernel.org
 +T:    git git://git.kernel.org/pub/scm/linux/kernel/git/sre/linux-power-supply.git
 +S:    Maintained
 +F:    Documentation/devicetree/bindings/power/reset/
 +F:    drivers/power/reset/
 +
  SYSV FILESYSTEM
  M:    Christoph Hellwig <hch@infradead.org>
  S:    Maintained
@@@ -11816,7 -11704,7 +11816,7 @@@ F:   include/linux/platform_data/lp855x.
  TI LP8727 CHARGER DRIVER
  M:    Milo Kim <milo.kim@ti.com>
  S:    Maintained
 -F:    drivers/power/lp8727_charger.c
 +F:    drivers/power/supply/lp8727_charger.c
  F:    include/linux/platform_data/lp8727.h
  
  TI LP8788 MFD DRIVER
@@@ -11825,7 -11713,7 +11825,7 @@@ S:   Maintaine
  F:    drivers/iio/adc/lp8788_adc.c
  F:    drivers/leds/leds-lp8788.c
  F:    drivers/mfd/lp8788*.c
 -F:    drivers/power/lp8788-charger.c
 +F:    drivers/power/supply/lp8788-charger.c
  F:    drivers/regulator/lp8788-*.c
  F:    include/linux/mfd/lp8788*.h
  
@@@ -11978,14 -11866,6 +11978,14 @@@ T: git git://linuxtv.org/media_tree.gi
  S:    Odd fixes
  F:    drivers/media/usb/tm6000/
  
 +TW5864 VIDEO4LINUX DRIVER
 +M:    Bluecherry Maintainers <maintainers@bluecherrydvr.com>
 +M:    Andrey Utkin <andrey.utkin@corp.bluecherry.net>
 +M:    Andrey Utkin <andrey_utkin@fastmail.com>
 +L:    linux-media@vger.kernel.org
 +S:    Supported
 +F:    drivers/media/pci/tw5864/
 +
  TW68 VIDEO4LINUX DRIVER
  M:    Hans Verkuil <hverkuil@xs4all.nl>
  L:    linux-media@vger.kernel.org
@@@ -12303,7 -12183,7 +12303,7 @@@ S:   Maintaine
  F:    drivers/net/usb/lan78xx.*
  
  USB MASS STORAGE DRIVER
 -M:    Matthew Dharm <mdharm-usb@one-eyed-alien.net>
 +M:    Alan Stern <stern@rowland.harvard.edu>
  L:    linux-usb@vger.kernel.org
  L:    usb-storage@lists.one-eyed-alien.net
  S:    Maintained
@@@ -12400,7 -12280,6 +12400,7 @@@ F:   drivers/net/usb/smsc75xx.
  
  USB SMSC95XX ETHERNET DRIVER
  M:    Steve Glendinning <steve.glendinning@shawell.net>
 +M:    Microchip Linux Driver Support <UNGLinuxDriver@microchip.com>
  L:    netdev@vger.kernel.org
  S:    Maintained
  F:    drivers/net/usb/smsc95xx.*
@@@ -12507,6 -12386,7 +12507,6 @@@ F:   fs/hostfs
  F:    fs/hppfs/
  
  USERSPACE I/O (UIO)
 -M:    "Hans J. Koch" <hjk@hansjkoch.de>
  M:    Greg Kroah-Hartman <gregkh@linuxfoundation.org>
  S:    Maintained
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc.git
@@@ -12688,7 -12568,7 +12688,7 @@@ F:   include/linux/if_*vlan.
  F:    net/8021q/
  
  VLYNQ BUS
- M:    Florian Fainelli <florian@openwrt.org>
+ M:    Florian Fainelli <f.fainelli@gmail.com>
  L:    openwrt-devel@lists.openwrt.org (subscribers-only)
  S:    Maintained
  F:    drivers/vlynq/vlynq.c
@@@ -12909,7 -12789,7 +12909,7 @@@ F:   drivers/input/touchscreen/wm97*.
  F:    drivers/mfd/arizona*
  F:    drivers/mfd/wm*.c
  F:    drivers/mfd/cs47l24*
 -F:    drivers/power/wm83*.c
 +F:    drivers/power/supply/wm83*.c
  F:    drivers/rtc/rtc-wm83*.c
  F:    drivers/regulator/wm8*.c
  F:    drivers/video/backlight/wm83*_bl.c
diff --combined arch/arm/kernel/smp.c
index 937c8920d741485a8992209a778a1ae385a93408,173ad30c20e0f7d4c6a402b82b532de8f6ca28a8..7dd14e8395e62976b3083f677b0e4d2ea0d71d95
@@@ -82,7 -82,7 +82,7 @@@ enum ipi_msg_type 
  
  static DECLARE_COMPLETION(cpu_running);
  
 -static struct smp_operations smp_ops;
 +static struct smp_operations smp_ops __ro_after_init;
  
  void __init smp_set_ops(const struct smp_operations *ops)
  {
@@@ -748,19 -748,10 +748,10 @@@ core_initcall(register_cpufreq_notifier
  
  static void raise_nmi(cpumask_t *mask)
  {
-       /*
-        * Generate the backtrace directly if we are running in a calling
-        * context that is not preemptible by the backtrace IPI. Note
-        * that nmi_cpu_backtrace() automatically removes the current cpu
-        * from mask.
-        */
-       if (cpumask_test_cpu(smp_processor_id(), mask) && irqs_disabled())
-               nmi_cpu_backtrace(NULL);
        smp_cross_call(mask, IPI_CPU_BACKTRACE);
  }
  
- void arch_trigger_all_cpu_backtrace(bool include_self)
+ void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self)
  {
-       nmi_trigger_all_cpu_backtrace(include_self, raise_nmi);
+       nmi_trigger_cpumask_backtrace(mask, exclude_self, raise_nmi);
  }
index a4f5f766af08b010e66ec04889c098a5267eee05,6ac2950ffb78ef383dbf4d3ca1882e6df052f5bf..27b2f1387df40b61b4aa059be5650d329964da6b
@@@ -202,7 -202,7 +202,7 @@@ void show_regs(struct pt_regs * regs
  
  static void tls_thread_flush(void)
  {
 -      asm ("msr tpidr_el0, xzr");
 +      write_sysreg(0, tpidr_el0);
  
        if (is_compat_task()) {
                current->thread.tp_value = 0;
                 * with a stale shadow state during context switch.
                 */
                barrier();
 -              asm ("msr tpidrro_el0, xzr");
 +              write_sysreg(0, tpidrro_el0);
        }
  }
  
@@@ -253,7 -253,7 +253,7 @@@ int copy_thread(unsigned long clone_fla
                 * Read the current TLS pointer from tpidr_el0 as it may be
                 * out-of-sync with the saved value.
                 */
 -              asm("mrs %0, tpidr_el0" : "=r" (*task_user_tls(p)));
 +              *task_user_tls(p) = read_sysreg(tpidr_el0);
  
                if (stack_start) {
                        if (is_compat_thread(task_thread_info(p)))
@@@ -289,15 -289,17 +289,15 @@@ static void tls_thread_switch(struct ta
  {
        unsigned long tpidr, tpidrro;
  
 -      asm("mrs %0, tpidr_el0" : "=r" (tpidr));
 +      tpidr = read_sysreg(tpidr_el0);
        *task_user_tls(current) = tpidr;
  
        tpidr = *task_user_tls(next);
        tpidrro = is_compat_thread(task_thread_info(next)) ?
                  next->thread.tp_value : 0;
  
 -      asm(
 -      "       msr     tpidr_el0, %0\n"
 -      "       msr     tpidrro_el0, %1"
 -      : : "r" (tpidr), "r" (tpidrro));
 +      write_sysreg(tpidr, tpidr_el0);
 +      write_sysreg(tpidrro, tpidrro_el0);
  }
  
  /* Restore the UAO state depending on next's addr_limit */
@@@ -372,12 -374,8 +372,8 @@@ unsigned long arch_align_stack(unsigne
  
  unsigned long arch_randomize_brk(struct mm_struct *mm)
  {
-       unsigned long range_end = mm->brk;
        if (is_compat_task())
-               range_end += 0x02000000;
+               return randomize_page(mm->brk, 0x02000000);
        else
-               range_end += 0x40000000;
-       return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
+               return randomize_page(mm->brk, 0x40000000);
  }
index 5ce9b2929e0d1100dc6e5270f4ee6e336e15013e,fe7f93b7b11bde71441799d8ef90f3e2671f8a4b..1105aab1e6d6af4be3f88c0ee22d4cfafd5c5ce6
@@@ -122,6 -122,7 +122,7 @@@ SECTION
                        ENTRY_TEXT
                        TEXT_TEXT
                        SCHED_TEXT
+                       CPUIDLE_TEXT
                        LOCK_TEXT
                        KPROBES_TEXT
                        HYPERVISOR_TEXT
        _data = .;
        _sdata = .;
        RW_DATA_SECTION(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE)
 +
 +      /*
 +       * Data written with the MMU off but read with the MMU on requires
 +       * cache lines to be invalidated, discarding up to a Cache Writeback
 +       * Granule (CWG) of data from the cache. Keep the section that
 +       * requires this type of maintenance to be in its own Cache Writeback
 +       * Granule (CWG) area so the cache maintenance operations don't
 +       * interfere with adjacent data.
 +       */
 +      .mmuoff.data.write : ALIGN(SZ_2K) {
 +              __mmuoff_data_start = .;
 +              *(.mmuoff.data.write)
 +      }
 +      . = ALIGN(SZ_2K);
 +      .mmuoff.data.read : {
 +              *(.mmuoff.data.read)
 +              __mmuoff_data_end = .;
 +      }
 +
        PECOFF_EDATA_PADDING
        _edata = .;
  
index 89be31ba70ed5e7a9dacdaf3e107c403cbb4f071,5537f95b28c9be169ffd42a42094eaeae68ce064..9a2db1c013d92e548fd04ef0a008ec442942350c
@@@ -65,8 -65,7 +65,8 @@@ EXPORT_SYMBOL(octeon_should_swizzle_tab
  extern void pci_console_init(const char *arg);
  #endif
  
 -static unsigned long long MAX_MEMORY = 512ull << 20;
 +static unsigned long long max_memory = ULLONG_MAX;
 +static unsigned long long reserve_low_mem;
  
  DEFINE_SEMAPHORE(octeon_bootbus_sem);
  EXPORT_SYMBOL(octeon_bootbus_sem);
@@@ -76,6 -75,7 +76,6 @@@ struct octeon_boot_descriptor *octeon_b
  struct cvmx_bootinfo *octeon_bootinfo;
  EXPORT_SYMBOL(octeon_bootinfo);
  
 -static unsigned long long RESERVE_LOW_MEM = 0ull;
  #ifdef CONFIG_KEXEC
  #ifdef CONFIG_SMP
  /*
@@@ -125,18 -125,18 +125,18 @@@ static void kexec_bootmem_init(uint64_
        bootmem_desc->major_version = CVMX_BOOTMEM_DESC_MAJ_VER;
        bootmem_desc->minor_version = CVMX_BOOTMEM_DESC_MIN_VER;
  
 -      addr = (OCTEON_DDR0_BASE + RESERVE_LOW_MEM + low_reserved_bytes);
 +      addr = (OCTEON_DDR0_BASE + reserve_low_mem + low_reserved_bytes);
        bootmem_desc->head_addr = 0;
  
        if (mem_size <= OCTEON_DDR0_SIZE) {
                __cvmx_bootmem_phy_free(addr,
 -                              mem_size - RESERVE_LOW_MEM -
 +                              mem_size - reserve_low_mem -
                                low_reserved_bytes, 0);
                return;
        }
  
        __cvmx_bootmem_phy_free(addr,
 -                      OCTEON_DDR0_SIZE - RESERVE_LOW_MEM -
 +                      OCTEON_DDR0_SIZE - reserve_low_mem -
                        low_reserved_bytes, 0);
  
        mem_size -= OCTEON_DDR0_SIZE;
@@@ -267,6 -267,17 +267,17 @@@ static void octeon_crash_shutdown(struc
        default_machine_crash_shutdown(regs);
  }
  
+ #ifdef CONFIG_SMP
+ void octeon_crash_smp_send_stop(void)
+ {
+       int cpu;
+       /* disable watchdogs */
+       for_each_online_cpu(cpu)
+               cvmx_write_csr(CVMX_CIU_WDOGX(cpu_logical_map(cpu)), 0);
+ }
+ #endif
  #endif /* CONFIG_KEXEC */
  
  #ifdef CONFIG_CAVIUM_RESERVE32
@@@ -846,15 -857,15 +857,15 @@@ void __init prom_init(void
  
        /* Default to 64MB in the simulator to speed things up */
        if (octeon_is_simulation())
 -              MAX_MEMORY = 64ull << 20;
 +              max_memory = 64ull << 20;
  
        arg = strstr(arcs_cmdline, "mem=");
        if (arg) {
 -              MAX_MEMORY = memparse(arg + 4, &p);
 -              if (MAX_MEMORY == 0)
 -                      MAX_MEMORY = 32ull << 30;
 +              max_memory = memparse(arg + 4, &p);
 +              if (max_memory == 0)
 +                      max_memory = 32ull << 30;
                if (*p == '@')
 -                      RESERVE_LOW_MEM = memparse(p + 1, &p);
 +                      reserve_low_mem = memparse(p + 1, &p);
        }
  
        arcs_cmdline[0] = 0;
                        cvmx_phys_to_ptr(octeon_boot_desc_ptr->argv[i]);
                if ((strncmp(arg, "MEM=", 4) == 0) ||
                    (strncmp(arg, "mem=", 4) == 0)) {
 -                      MAX_MEMORY = memparse(arg + 4, &p);
 -                      if (MAX_MEMORY == 0)
 -                              MAX_MEMORY = 32ull << 30;
 +                      max_memory = memparse(arg + 4, &p);
 +                      if (max_memory == 0)
 +                              max_memory = 32ull << 30;
                        if (*p == '@')
 -                              RESERVE_LOW_MEM = memparse(p + 1, &p);
 +                              reserve_low_mem = memparse(p + 1, &p);
  #ifdef CONFIG_KEXEC
                } else if (strncmp(arg, "crashkernel=", 12) == 0) {
                        crashk_size = memparse(arg+12, &p);
        _machine_kexec_shutdown = octeon_shutdown;
        _machine_crash_shutdown = octeon_crash_shutdown;
        _machine_kexec_prepare = octeon_kexec_prepare;
+ #ifdef CONFIG_SMP
+       _crash_smp_send_stop = octeon_crash_smp_send_stop;
+ #endif
  #endif
  
        octeon_user_io_init();
@@@ -957,13 -971,13 +971,13 @@@ void __init plat_mem_setup(void
         * to consistently work.
         */
        mem_alloc_size = 4 << 20;
 -      if (mem_alloc_size > MAX_MEMORY)
 -              mem_alloc_size = MAX_MEMORY;
 +      if (mem_alloc_size > max_memory)
 +              mem_alloc_size = max_memory;
  
  /* Crashkernel ignores bootmem list. It relies on mem=X@Y option */
  #ifdef CONFIG_CRASH_DUMP
 -      add_memory_region(RESERVE_LOW_MEM, MAX_MEMORY, BOOT_MEM_RAM);
 -      total += MAX_MEMORY;
 +      add_memory_region(reserve_low_mem, max_memory, BOOT_MEM_RAM);
 +      total += max_memory;
  #else
  #ifdef CONFIG_KEXEC
        if (crashk_size > 0) {
         */
        cvmx_bootmem_lock();
        while ((boot_mem_map.nr_map < BOOT_MEM_MAP_MAX)
 -              && (total < MAX_MEMORY)) {
 +              && (total < max_memory)) {
                memory = cvmx_bootmem_phy_alloc(mem_alloc_size,
                                                __pa_symbol(&_end), -1,
                                                0x100000,
index 19980b36f394b18e6816629390130fa3eb789115,f70989c640843e935df14675d27182266b556ae6..026ea82ecc60492e1ee27eae439fb2b9c6402899
@@@ -39,6 -39,9 +39,6 @@@ DECLARE_EARLY_PER_CPU_READ_MOSTLY(u16, 
  DECLARE_EARLY_PER_CPU_READ_MOSTLY(int, x86_cpu_to_logical_apicid);
  #endif
  
 -/* Static state in head.S used to set up a CPU */
 -extern unsigned long stack_start; /* Initial stack pointer address */
 -
  struct task_struct;
  
  struct smp_ops {
@@@ -47,6 -50,7 +47,7 @@@
        void (*smp_cpus_done)(unsigned max_cpus);
  
        void (*stop_other_cpus)(int wait);
+       void (*crash_stop_other_cpus)(void);
        void (*smp_send_reschedule)(int cpu);
  
        int (*cpu_up)(unsigned cpu, struct task_struct *tidle);
diff --combined arch/x86/kernel/e820.c
index 871f1863457dccdf2fdcdf310aae774ab1b3c9d2,d935983ff90eeaf49bf2962185e605ee9b2db2c4..a1b4da92921c75b053399880e5bcd0e9ce071e4e
@@@ -388,11 -388,11 +388,11 @@@ static int __init __append_e820_map(str
        while (nr_map) {
                u64 start = biosmap->addr;
                u64 size = biosmap->size;
 -              u64 end = start + size;
 +              u64 end = start + size - 1;
                u32 type = biosmap->type;
  
                /* Overflow in 64 bits? Ignore the memory map. */
 -              if (start > end)
 +              if (start > end && likely(size))
                        return -1;
  
                e820_add_region(start, size, type);
@@@ -1188,6 -1188,6 +1188,6 @@@ void __init memblock_find_dma_reserve(v
                        nr_free_pages += end_pfn - start_pfn;
        }
  
-       set_dma_reserve(nr_pages - nr_free_pages);
+       set_memory_reserve(nr_pages - nr_free_pages, false);
  #endif
  }
index c1fa790c81cd51a0cb5f5b1650a749c3b86300ee,857a686eba5eed5b2d6981be60791288733d66da..5308fb39e3040dae0075e96c5bf78428806fa55c
@@@ -32,7 -32,6 +32,7 @@@
  #include <asm/tlbflush.h>
  #include <asm/mce.h>
  #include <asm/vm86.h>
 +#include <asm/switch_to.h>
  
  /*
   * per-CPU TSS segments. Threads are completely 'soft' on Linux,
@@@ -302,7 -301,7 +302,7 @@@ void arch_cpu_idle(void
  /*
   * We use this if we don't have any better idle routine..
   */
- void default_idle(void)
+ void __cpuidle default_idle(void)
  {
        trace_cpu_idle_rcuidle(1, smp_processor_id());
        safe_halt();
@@@ -417,7 -416,7 +417,7 @@@ static int prefer_mwait_c1_over_halt(co
   * with interrupts enabled and no flags, which is backwards compatible with the
   * original MWAIT implementation.
   */
- static void mwait_idle(void)
+ static __cpuidle void mwait_idle(void)
  {
        if (!current_set_polling_and_test()) {
                trace_cpu_idle_rcuidle(1, smp_processor_id());
@@@ -509,21 -508,9 +509,20 @@@ unsigned long arch_align_stack(unsigne
  
  unsigned long arch_randomize_brk(struct mm_struct *mm)
  {
-       unsigned long range_end = mm->brk + 0x02000000;
-       return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
+       return randomize_page(mm->brk, 0x02000000);
  }
  
 +/*
 + * Return saved PC of a blocked thread.
 + * What is this good for? it will be always the scheduler or ret_from_fork.
 + */
 +unsigned long thread_saved_pc(struct task_struct *tsk)
 +{
 +      struct inactive_task_frame *frame =
 +              (struct inactive_task_frame *) READ_ONCE(tsk->thread.sp);
 +      return READ_ONCE_NOCHECK(frame->ret_addr);
 +}
 +
  /*
   * Called from fs/proc with a reference on @p to find the function
   * which called into schedule(). This needs to be done carefully
@@@ -568,7 -555,7 +567,7 @@@ unsigned long get_wchan(struct task_str
        if (sp < bottom || sp > top)
                return 0;
  
 -      fp = READ_ONCE_NOCHECK(*(unsigned long *)sp);
 +      fp = READ_ONCE_NOCHECK(((struct inactive_task_frame *)sp)->bp);
        do {
                if (fp < bottom || fp > top)
                        return 0;
index 72cfe3587dd865c6532874dc56f012ccbb7cdc04,18a174c7fb8716c4525c518f458212f58b4b24f0..31411fc82662c82ec127188ccdb5fc3ac818e4e9
@@@ -30,6 -30,10 +30,6 @@@ jiffies = jiffies_64 + 4
  jiffies = jiffies_64;
  #endif
  
 -#ifndef KERNELOFFSET
 -#define KERNELOFFSET 0xd0003000
 -#endif
 -
  /* Note: In the following macros, it would be nice to specify only the
     vector name and section kind and construct "sym" and "section" using
     CPP concatenation, but that does not work reliably.  Concatenating a
@@@ -89,6 -93,9 +89,9 @@@ SECTION
      VMLINUX_SYMBOL(__sched_text_start) = .;
      *(.sched.literal .sched.text)
      VMLINUX_SYMBOL(__sched_text_end) = .;
+     VMLINUX_SYMBOL(__cpuidle_text_start) = .;
+     *(.cpuidle.literal .cpuidle.text)
+     VMLINUX_SYMBOL(__cpuidle_text_end) = .;
      VMLINUX_SYMBOL(__lock_text_start) = .;
      *(.spinlock.literal .spinlock.text)
      VMLINUX_SYMBOL(__lock_text_end) = .;
diff --combined drivers/char/random.c
index 7274ae89ddb39f3450ac9f56ef64db6384330497,d131e152c8ce6a3e70a7e0fe287275269f0c40c6..d6876d50622075f1b490bf7a19831b4e8784adb6
@@@ -479,8 -479,8 +479,8 @@@ static ssize_t _extract_entropy(struct 
  
  static void crng_reseed(struct crng_state *crng, struct entropy_store *r);
  static void push_to_pool(struct work_struct *work);
 -static __u32 input_pool_data[INPUT_POOL_WORDS];
 -static __u32 blocking_pool_data[OUTPUT_POOL_WORDS];
 +static __u32 input_pool_data[INPUT_POOL_WORDS] __latent_entropy;
 +static __u32 blocking_pool_data[OUTPUT_POOL_WORDS] __latent_entropy;
  
  static struct entropy_store input_pool = {
        .poolinfo = &poolinfo_table[0],
@@@ -2100,23 -2100,37 +2100,37 @@@ unsigned long get_random_long(void
  }
  EXPORT_SYMBOL(get_random_long);
  
- /*
-  * randomize_range() returns a start address such that
+ /**
+  * randomize_page - Generate a random, page aligned address
+  * @start:    The smallest acceptable address the caller will take.
+  * @range:    The size of the area, starting at @start, within which the
+  *            random address must fall.
+  *
+  * If @start + @range would overflow, @range is capped.
   *
-  *    [...... <range> .....]
-  *  start                  end
+  * NOTE: Historical use of randomize_range, which this replaces, presumed that
+  * @start was already page aligned.  We now align it regardless.
   *
-  * a <range> with size "len" starting at the return value is inside in the
-  * area defined by [start, end], but is otherwise randomized.
+  * Return: A page aligned address within [start, start + range).  On error,
+  * @start is returned.
   */
  unsigned long
- randomize_range(unsigned long start, unsigned long end, unsigned long len)
+ randomize_page(unsigned long start, unsigned long range)
  {
-       unsigned long range = end - len - start;
+       if (!PAGE_ALIGNED(start)) {
+               range -= PAGE_ALIGN(start) - start;
+               start = PAGE_ALIGN(start);
+       }
  
-       if (end <= start + len)
-               return 0;
-       return PAGE_ALIGN(get_random_int() % range + start);
+       if (start > ULONG_MAX - range)
+               range = ULONG_MAX - start;
+       range >>= PAGE_SHIFT;
+       if (range == 0)
+               return start;
+       return start + (get_random_long() % range << PAGE_SHIFT);
  }
  
  /* Interface for in-kernel drivers of true hardware RNGs.
diff --combined fs/nfs/internal.h
index 48d1adfe25d69c6675a3c6a2a1da2930dae9f62b,da9e5584bfdc62b96378cc97bfc393402e13d53a..bed73db108c5cdc928f3b07a3c81cbfdcdcb25ee
@@@ -359,8 -359,7 +359,8 @@@ int nfs_unlink(struct inode *, struct d
  int nfs_symlink(struct inode *, struct dentry *, const char *);
  int nfs_link(struct dentry *, struct inode *, struct dentry *);
  int nfs_mknod(struct inode *, struct dentry *, umode_t, dev_t);
 -int nfs_rename(struct inode *, struct dentry *, struct inode *, struct dentry *);
 +int nfs_rename(struct inode *, struct dentry *,
 +             struct inode *, struct dentry *, unsigned int);
  
  /* file.c */
  int nfs_file_fsync(struct file *file, loff_t start, loff_t end, int datasync);
@@@ -682,11 -681,11 +682,11 @@@ unsigned int nfs_page_length(struct pag
        loff_t i_size = i_size_read(page_file_mapping(page)->host);
  
        if (i_size > 0) {
-               pgoff_t page_index = page_file_index(page);
+               pgoff_t index = page_index(page);
                pgoff_t end_index = (i_size - 1) >> PAGE_SHIFT;
-               if (page_index < end_index)
+               if (index < end_index)
                        return PAGE_SIZE;
-               if (page_index == end_index)
+               if (index == end_index)
                        return ((i_size - 1) & ~PAGE_MASK) + 1;
        }
        return 0;
diff --combined fs/proc/base.c
index 3b792ab3c0dc17d47b42643ec9a9807f663a229c,5ef2ae81f623387aaaa5b68f8b6ecce26e26542b..dc7fe5f3a53c5e97a2b1df4b89906c12ca364434
@@@ -483,7 -483,7 +483,7 @@@ static int proc_pid_stack(struct seq_fi
                save_stack_trace_tsk(task, &trace);
  
                for (i = 0; i < trace.nr_entries; i++) {
 -                      seq_printf(m, "[<%pK>] %pS\n",
 +                      seq_printf(m, "[<%pK>] %pB\n",
                                   (void *)entries[i], (void *)entries[i]);
                }
                unlock_trace(task);
@@@ -2280,16 -2280,27 +2280,27 @@@ static ssize_t timerslack_ns_write(stru
        if (!p)
                return -ESRCH;
  
-       if (ptrace_may_access(p, PTRACE_MODE_ATTACH_FSCREDS)) {
-               task_lock(p);
-               if (slack_ns == 0)
-                       p->timer_slack_ns = p->default_timer_slack_ns;
-               else
-                       p->timer_slack_ns = slack_ns;
-               task_unlock(p);
-       } else
-               count = -EPERM;
+       if (p != current) {
+               if (!capable(CAP_SYS_NICE)) {
+                       count = -EPERM;
+                       goto out;
+               }
+               err = security_task_setscheduler(p);
+               if (err) {
+                       count = err;
+                       goto out;
+               }
+       }
+       task_lock(p);
+       if (slack_ns == 0)
+               p->timer_slack_ns = p->default_timer_slack_ns;
+       else
+               p->timer_slack_ns = slack_ns;
+       task_unlock(p);
  
+ out:
        put_task_struct(p);
  
        return count;
@@@ -2299,19 -2310,28 +2310,28 @@@ static int timerslack_ns_show(struct se
  {
        struct inode *inode = m->private;
        struct task_struct *p;
-       int err =  0;
+       int err = 0;
  
        p = get_proc_task(inode);
        if (!p)
                return -ESRCH;
  
-       if (ptrace_may_access(p, PTRACE_MODE_ATTACH_FSCREDS)) {
-               task_lock(p);
-               seq_printf(m, "%llu\n", p->timer_slack_ns);
-               task_unlock(p);
-       } else
-               err = -EPERM;
+       if (p != current) {
+               if (!capable(CAP_SYS_NICE)) {
+                       err = -EPERM;
+                       goto out;
+               }
+               err = security_task_getscheduler(p);
+               if (err)
+                       goto out;
+       }
  
+       task_lock(p);
+       seq_printf(m, "%llu\n", p->timer_slack_ns);
+       task_unlock(p);
+ out:
        put_task_struct(p);
  
        return err;
diff --combined fs/proc/task_mmu.c
index f6fa99eca5158f36d3fe38a23baac5b7c54ca6bb,1026b786289632ac7696947ec2583efacfbfef8a..d2a70cf2154e68958b57c3e884b7614522a925c4
@@@ -581,8 -581,6 +581,8 @@@ static void smaps_pmd_entry(pmd_t *pmd
                mss->anonymous_thp += HPAGE_PMD_SIZE;
        else if (PageSwapBacked(page))
                mss->shmem_thp += HPAGE_PMD_SIZE;
 +      else if (is_zone_device_page(page))
 +              /* pass */;
        else
                VM_BUG_ON_PAGE(1, page);
        smaps_account(mss, page, true, pmd_young(*pmd), pmd_dirty(*pmd));
@@@ -1070,7 -1068,7 +1070,7 @@@ static ssize_t clear_refs_write(struct 
                        }
                        mmu_notifier_invalidate_range_start(mm, 0, -1);
                }
-               walk_page_range(0, ~0UL, &clear_refs_walk);
+               walk_page_range(0, mm->highest_vm_end, &clear_refs_walk);
                if (type == CLEAR_REFS_SOFT_DIRTY)
                        mmu_notifier_invalidate_range_end(mm, 0, -1);
                flush_tlb_mm(mm);
index ad9d8f94dc7a949cf9309d61afe4cd5f2d17fcf9,3e42bcdd014b45b66b95853ee699937666b11b3f..c842c10d735e69c700a3b9892b795444bd0b711e
        *(.dtb.init.rodata)                                             \
        VMLINUX_SYMBOL(__dtb_end) = .;
  
 -/* .data section */
 +/*
 + * .data section
 + * -fdata-sections generates .data.identifier which needs to be pulled in
 + * with .data, but don't want to pull in .data..stuff which has its own
 + * requirements. Same for bss.
 + */
  #define DATA_DATA                                                     \
 -      *(.data)                                                        \
 +      *(.data .data.[0-9a-zA-Z_]*)                                    \
        *(.ref.data)                                                    \
        *(.data..shared_aligned) /* percpu related */                   \
        MEM_KEEP(init.data)                                             \
        /* Kernel symbol table: Normal symbols */                       \
        __ksymtab         : AT(ADDR(__ksymtab) - LOAD_OFFSET) {         \
                VMLINUX_SYMBOL(__start___ksymtab) = .;                  \
 -              *(SORT(___ksymtab+*))                                   \
 +              KEEP(*(SORT(___ksymtab+*)))                             \
                VMLINUX_SYMBOL(__stop___ksymtab) = .;                   \
        }                                                               \
                                                                        \
        /* Kernel symbol table: GPL-only symbols */                     \
        __ksymtab_gpl     : AT(ADDR(__ksymtab_gpl) - LOAD_OFFSET) {     \
                VMLINUX_SYMBOL(__start___ksymtab_gpl) = .;              \
 -              *(SORT(___ksymtab_gpl+*))                               \
 +              KEEP(*(SORT(___ksymtab_gpl+*)))                         \
                VMLINUX_SYMBOL(__stop___ksymtab_gpl) = .;               \
        }                                                               \
                                                                        \
        /* Kernel symbol table: Normal unused symbols */                \
        __ksymtab_unused  : AT(ADDR(__ksymtab_unused) - LOAD_OFFSET) {  \
                VMLINUX_SYMBOL(__start___ksymtab_unused) = .;           \
 -              *(SORT(___ksymtab_unused+*))                            \
 +              KEEP(*(SORT(___ksymtab_unused+*)))                      \
                VMLINUX_SYMBOL(__stop___ksymtab_unused) = .;            \
        }                                                               \
                                                                        \
        /* Kernel symbol table: GPL-only unused symbols */              \
        __ksymtab_unused_gpl : AT(ADDR(__ksymtab_unused_gpl) - LOAD_OFFSET) { \
                VMLINUX_SYMBOL(__start___ksymtab_unused_gpl) = .;       \
 -              *(SORT(___ksymtab_unused_gpl+*))                        \
 +              KEEP(*(SORT(___ksymtab_unused_gpl+*)))                  \
                VMLINUX_SYMBOL(__stop___ksymtab_unused_gpl) = .;        \
        }                                                               \
                                                                        \
        /* Kernel symbol table: GPL-future-only symbols */              \
        __ksymtab_gpl_future : AT(ADDR(__ksymtab_gpl_future) - LOAD_OFFSET) { \
                VMLINUX_SYMBOL(__start___ksymtab_gpl_future) = .;       \
 -              *(SORT(___ksymtab_gpl_future+*))                        \
 +              KEEP(*(SORT(___ksymtab_gpl_future+*)))                  \
                VMLINUX_SYMBOL(__stop___ksymtab_gpl_future) = .;        \
        }                                                               \
                                                                        \
        /* Kernel symbol table: Normal symbols */                       \
        __kcrctab         : AT(ADDR(__kcrctab) - LOAD_OFFSET) {         \
                VMLINUX_SYMBOL(__start___kcrctab) = .;                  \
 -              *(SORT(___kcrctab+*))                                   \
 +              KEEP(*(SORT(___kcrctab+*)))                             \
                VMLINUX_SYMBOL(__stop___kcrctab) = .;                   \
        }                                                               \
                                                                        \
        /* Kernel symbol table: GPL-only symbols */                     \
        __kcrctab_gpl     : AT(ADDR(__kcrctab_gpl) - LOAD_OFFSET) {     \
                VMLINUX_SYMBOL(__start___kcrctab_gpl) = .;              \
 -              *(SORT(___kcrctab_gpl+*))                               \
 +              KEEP(*(SORT(___kcrctab_gpl+*)))                         \
                VMLINUX_SYMBOL(__stop___kcrctab_gpl) = .;               \
        }                                                               \
                                                                        \
        /* Kernel symbol table: Normal unused symbols */                \
        __kcrctab_unused  : AT(ADDR(__kcrctab_unused) - LOAD_OFFSET) {  \
                VMLINUX_SYMBOL(__start___kcrctab_unused) = .;           \
 -              *(SORT(___kcrctab_unused+*))                            \
 +              KEEP(*(SORT(___kcrctab_unused+*)))                      \
                VMLINUX_SYMBOL(__stop___kcrctab_unused) = .;            \
        }                                                               \
                                                                        \
        /* Kernel symbol table: GPL-only unused symbols */              \
        __kcrctab_unused_gpl : AT(ADDR(__kcrctab_unused_gpl) - LOAD_OFFSET) { \
                VMLINUX_SYMBOL(__start___kcrctab_unused_gpl) = .;       \
 -              *(SORT(___kcrctab_unused_gpl+*))                        \
 +              KEEP(*(SORT(___kcrctab_unused_gpl+*)))                  \
                VMLINUX_SYMBOL(__stop___kcrctab_unused_gpl) = .;        \
        }                                                               \
                                                                        \
        /* Kernel symbol table: GPL-future-only symbols */              \
        __kcrctab_gpl_future : AT(ADDR(__kcrctab_gpl_future) - LOAD_OFFSET) { \
                VMLINUX_SYMBOL(__start___kcrctab_gpl_future) = .;       \
 -              *(SORT(___kcrctab_gpl_future+*))                        \
 +              KEEP(*(SORT(___kcrctab_gpl_future+*)))                  \
                VMLINUX_SYMBOL(__stop___kcrctab_gpl_future) = .;        \
        }                                                               \
                                                                        \
        /* Kernel symbol table: strings */                              \
          __ksymtab_strings : AT(ADDR(__ksymtab_strings) - LOAD_OFFSET) {       \
 -              *(__ksymtab_strings)                                    \
 +              KEEP(*(__ksymtab_strings))                              \
        }                                                               \
                                                                        \
        /* __*init sections */                                          \
  #define SECURITY_INIT                                                 \
        .security_initcall.init : AT(ADDR(.security_initcall.init) - LOAD_OFFSET) { \
                VMLINUX_SYMBOL(__security_initcall_start) = .;          \
 -              *(.security_initcall.init)                              \
 +              KEEP(*(.security_initcall.init))                        \
                VMLINUX_SYMBOL(__security_initcall_end) = .;            \
        }
  
   * during second ld run in second ld pass when generating System.map */
  #define TEXT_TEXT                                                     \
                ALIGN_FUNCTION();                                       \
 -              *(.text.hot .text .text.fixup .text.unlikely)           \
 +              *(.text.hot .text .text.fixup .text.unlikely .text.*)   \
                *(.ref.text)                                            \
        MEM_KEEP(init.text)                                             \
        MEM_KEEP(exit.text)                                             \
                *(.spinlock.text)                                       \
                VMLINUX_SYMBOL(__lock_text_end) = .;
  
+ #define CPUIDLE_TEXT                                                  \
+               ALIGN_FUNCTION();                                       \
+               VMLINUX_SYMBOL(__cpuidle_text_start) = .;               \
+               *(.cpuidle.text)                                        \
+               VMLINUX_SYMBOL(__cpuidle_text_end) = .;
  #define KPROBES_TEXT                                                  \
                ALIGN_FUNCTION();                                       \
                VMLINUX_SYMBOL(__kprobes_text_start) = .;               \
  
  /* init and exit section handling */
  #define INIT_DATA                                                     \
 +      KEEP(*(SORT(___kentry+*)))                                      \
        *(.init.data)                                                   \
        MEM_DISCARD(init.data)                                          \
        KERNEL_CTORS()                                                  \
                BSS_FIRST_SECTIONS                                      \
                *(.bss..page_aligned)                                   \
                *(.dynbss)                                              \
 -              *(.bss)                                                 \
 +              *(.bss .bss.[0-9a-zA-Z_]*)                              \
                *(COMMON)                                               \
        }
  
  
  #define INIT_CALLS_LEVEL(level)                                               \
                VMLINUX_SYMBOL(__initcall##level##_start) = .;          \
 -              *(.initcall##level##.init)                              \
 -              *(.initcall##level##s.init)                             \
 +              KEEP(*(.initcall##level##.init))                        \
 +              KEEP(*(.initcall##level##s.init))                       \
  
  #define INIT_CALLS                                                    \
                VMLINUX_SYMBOL(__initcall_start) = .;                   \
 -              *(.initcallearly.init)                                  \
 +              KEEP(*(.initcallearly.init))                            \
                INIT_CALLS_LEVEL(0)                                     \
                INIT_CALLS_LEVEL(1)                                     \
                INIT_CALLS_LEVEL(2)                                     \
  
  #define CON_INITCALL                                                  \
                VMLINUX_SYMBOL(__con_initcall_start) = .;               \
 -              *(.con_initcall.init)                                   \
 +              KEEP(*(.con_initcall.init))                             \
                VMLINUX_SYMBOL(__con_initcall_end) = .;
  
  #define SECURITY_INITCALL                                             \
                VMLINUX_SYMBOL(__security_initcall_start) = .;          \
 -              *(.security_initcall.init)                              \
 +              KEEP(*(.security_initcall.init))                        \
                VMLINUX_SYMBOL(__security_initcall_end) = .;
  
  #ifdef CONFIG_BLK_DEV_INITRD
  #define INIT_RAM_FS                                                   \
        . = ALIGN(4);                                                   \
        VMLINUX_SYMBOL(__initramfs_start) = .;                          \
 -      *(.init.ramfs)                                                  \
 +      KEEP(*(.init.ramfs))                                            \
        . = ALIGN(8);                                                   \
 -      *(.init.ramfs.info)
 +      KEEP(*(.init.ramfs.info))
  #else
  #define INIT_RAM_FS
  #endif
diff --combined include/linux/cpu.h
index 7572d9e9dced921e1732226a2c00f5802f58c735,6babfa6db9d99387ef5ec01a58ca3118a5a8975a..b886dc17f2f3457db43a2523aacb35e74e49e75b
@@@ -61,8 -61,17 +61,8 @@@ struct notifier_block
  #define CPU_DOWN_PREPARE      0x0005 /* CPU (unsigned)v going down */
  #define CPU_DOWN_FAILED               0x0006 /* CPU (unsigned)v NOT going down */
  #define CPU_DEAD              0x0007 /* CPU (unsigned)v dead */
 -#define CPU_DYING             0x0008 /* CPU (unsigned)v not running any task,
 -                                      * not handling interrupts, soon dead.
 -                                      * Called on the dying cpu, interrupts
 -                                      * are already disabled. Must not
 -                                      * sleep, must not fail */
  #define CPU_POST_DEAD         0x0009 /* CPU (unsigned)v dead, cpu_hotplug
                                        * lock is dropped */
 -#define CPU_STARTING          0x000A /* CPU (unsigned)v soon running.
 -                                      * Called on the new cpu, just before
 -                                      * enabling interrupts. Must not sleep,
 -                                      * must not fail */
  #define CPU_BROKEN            0x000B /* CPU (unsigned)v did not die properly,
                                        * perhaps due to preemption. */
  
@@@ -77,6 -86,9 +77,6 @@@
  #define CPU_DOWN_PREPARE_FROZEN       (CPU_DOWN_PREPARE | CPU_TASKS_FROZEN)
  #define CPU_DOWN_FAILED_FROZEN        (CPU_DOWN_FAILED | CPU_TASKS_FROZEN)
  #define CPU_DEAD_FROZEN               (CPU_DEAD | CPU_TASKS_FROZEN)
 -#define CPU_DYING_FROZEN      (CPU_DYING | CPU_TASKS_FROZEN)
 -#define CPU_STARTING_FROZEN   (CPU_STARTING | CPU_TASKS_FROZEN)
 -
  
  #ifdef CONFIG_SMP
  extern bool cpuhp_tasks_frozen;
@@@ -216,11 -228,7 +216,11 @@@ static inline void cpu_hotplug_done(voi
  #endif                /* CONFIG_HOTPLUG_CPU */
  
  #ifdef CONFIG_PM_SLEEP_SMP
 -extern int disable_nonboot_cpus(void);
 +extern int freeze_secondary_cpus(int primary);
 +static inline int disable_nonboot_cpus(void)
 +{
 +      return freeze_secondary_cpus(0);
 +}
  extern void enable_nonboot_cpus(void);
  #else /* !CONFIG_PM_SLEEP_SMP */
  static inline int disable_nonboot_cpus(void) { return 0; }
@@@ -231,6 -239,11 +231,11 @@@ void cpu_startup_entry(enum cpuhp_stat
  
  void cpu_idle_poll_ctrl(bool enable);
  
+ /* Attach to any functions which should be considered cpuidle. */
+ #define __cpuidle     __attribute__((__section__(".cpuidle.text")))
+ bool cpu_in_idle(unsigned long pc);
  void arch_cpu_idle(void);
  void arch_cpu_idle_prepare(void);
  void arch_cpu_idle_enter(void);
diff --combined include/linux/random.h
index d80a4388a4fd793b0113675c1f8b42cb86b5506e,f7bb7a355cf71381100730a2379ba206bc3258ea..7bd2403e4fef1ad7fb0a5f03b4e104e96234d26b
@@@ -18,20 -18,9 +18,20 @@@ struct random_ready_callback 
  };
  
  extern void add_device_randomness(const void *, unsigned int);
 +
 +#if defined(CONFIG_GCC_PLUGIN_LATENT_ENTROPY) && !defined(__CHECKER__)
 +static inline void add_latent_entropy(void)
 +{
 +      add_device_randomness((const void *)&latent_entropy,
 +                            sizeof(latent_entropy));
 +}
 +#else
 +static inline void add_latent_entropy(void) {}
 +#endif
 +
  extern void add_input_randomness(unsigned int type, unsigned int code,
 -                               unsigned int value);
 -extern void add_interrupt_randomness(int irq, int irq_flags);
 +                               unsigned int value) __latent_entropy;
 +extern void add_interrupt_randomness(int irq, int irq_flags) __latent_entropy;
  
  extern void get_random_bytes(void *buf, int nbytes);
  extern int add_random_ready_callback(struct random_ready_callback *rdy);
@@@ -45,7 -34,7 +45,7 @@@ extern const struct file_operations ran
  
  unsigned int get_random_int(void);
  unsigned long get_random_long(void);
- unsigned long randomize_range(unsigned long start, unsigned long end, unsigned long len);
+ unsigned long randomize_page(unsigned long start, unsigned long range);
  
  u32 prandom_u32(void);
  void prandom_bytes(void *buf, size_t nbytes);
diff --combined include/linux/relay.h
index ecbb34a382b898cabf40d04237fbfae5534c5bc0,61355f1cc1bf4ff294941229ca6895fc5e4045ef..ab0c59e947104e053996fb4141306f79744e92b6
@@@ -19,7 -19,7 +19,8 @@@
  #include <linux/fs.h>
  #include <linux/poll.h>
  #include <linux/kref.h>
 +#include <linux/percpu.h>
+ #include <linux/irq_work.h>
  
  /*
   * Tracks changes to rchan/rchan_buf structs
@@@ -38,7 -38,7 +39,7 @@@ struct rchan_bu
        size_t subbufs_consumed;        /* count of sub-buffers consumed */
        struct rchan *chan;             /* associated channel */
        wait_queue_head_t read_wait;    /* reader wait queue */
-       struct timer_list timer;        /* reader wake-up timer */
+       struct irq_work wakeup_work;    /* reader wakeup */
        struct dentry *dentry;          /* channel file dentry */
        struct kref kref;               /* channel buffer refcount */
        struct page **page_array;       /* array of current buffer pages */
@@@ -64,7 -64,7 +65,7 @@@ struct rcha
        struct kref kref;               /* channel refcount */
        void *private_data;             /* for user-defined data */
        size_t last_toobig;             /* tried to log event > subbuf size */
 -      struct rchan_buf *buf[NR_CPUS]; /* per-cpu channel buffers */
 +      struct rchan_buf ** __percpu buf; /* per-cpu channel buffers */
        int is_global;                  /* One global buffer ? */
        struct list_head list;          /* for channel list */
        struct dentry *parent;          /* parent dentry passed to open */
@@@ -205,7 -205,7 +206,7 @@@ static inline void relay_write(struct r
        struct rchan_buf *buf;
  
        local_irq_save(flags);
 -      buf = chan->buf[smp_processor_id()];
 +      buf = *this_cpu_ptr(chan->buf);
        if (unlikely(buf->offset + length > chan->subbuf_size))
                length = relay_switch_subbuf(buf, length);
        memcpy(buf->data + buf->offset, data, length);
@@@ -231,12 -231,12 +232,12 @@@ static inline void __relay_write(struc
  {
        struct rchan_buf *buf;
  
 -      buf = chan->buf[get_cpu()];
 +      buf = *get_cpu_ptr(chan->buf);
        if (unlikely(buf->offset + length > buf->chan->subbuf_size))
                length = relay_switch_subbuf(buf, length);
        memcpy(buf->data + buf->offset, data, length);
        buf->offset += length;
 -      put_cpu();
 +      put_cpu_ptr(chan->buf);
  }
  
  /**
   */
  static inline void *relay_reserve(struct rchan *chan, size_t length)
  {
 -      void *reserved;
 -      struct rchan_buf *buf = chan->buf[smp_processor_id()];
 +      void *reserved = NULL;
 +      struct rchan_buf *buf = *get_cpu_ptr(chan->buf);
  
        if (unlikely(buf->offset + length > buf->chan->subbuf_size)) {
                length = relay_switch_subbuf(buf, length);
                if (!length)
 -                      return NULL;
 +                      goto end;
        }
        reserved = buf->data + buf->offset;
        buf->offset += length;
  
 +end:
 +      put_cpu_ptr(chan->buf);
        return reserved;
  }
  
@@@ -288,11 -286,5 +289,11 @@@ static inline void subbuf_start_reserve
   */
  extern const struct file_operations relay_file_operations;
  
 +#ifdef CONFIG_RELAY
 +int relay_prepare_cpu(unsigned int cpu);
 +#else
 +#define relay_prepare_cpu     NULL
 +#endif
 +
  #endif /* _LINUX_RELAY_H */
  
diff --combined include/linux/sched.h
index 06bd6ab542313770f01251b1af0400086f51c97f,0e974b51a3f2736f3ac0b39335e9e3206fa6993f..af39baf764ddf68a6aa833c05ad278ad61f28ab9
@@@ -522,8 -522,9 +522,9 @@@ static inline int get_dumpable(struct m
  
  #define MMF_HAS_UPROBES               19      /* has uprobes */
  #define MMF_RECALC_UPROBES    20      /* MMF_HAS_UPROBES can be wrong */
- #define MMF_OOM_REAPED                21      /* mm has been already reaped */
- #define MMF_OOM_NOT_REAPABLE  22      /* mm couldn't be reaped */
+ #define MMF_OOM_SKIP          21      /* mm is of no interest for the OOM killer */
+ #define MMF_UNSTABLE          22      /* mm is unstable for copy_from_user */
+ #define MMF_HUGE_ZERO_PAGE    23      /* mm has ever used the global huge zero page */
  
  #define MMF_INIT_MASK         (MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK)
  
@@@ -671,7 -672,6 +672,6 @@@ struct signal_struct 
        atomic_t                sigcnt;
        atomic_t                live;
        int                     nr_threads;
-       atomic_t oom_victims; /* # of TIF_MEDIE threads in this thread group */
        struct list_head        thread_head;
  
        wait_queue_head_t       wait_chldexit;  /* for wait4() */
        short oom_score_adj;            /* OOM kill score adjustment */
        short oom_score_adj_min;        /* OOM kill score adjustment min value.
                                         * Only settable by CAP_SYS_RESOURCE. */
+       struct mm_struct *oom_mm;       /* recorded mm when the thread group got
+                                        * killed by the oom killer */
  
        struct mutex cred_guard_mutex;  /* guard against foreign influences on
                                         * credential calculations
@@@ -1022,8 -1024,7 +1024,8 @@@ extern void wake_up_q(struct wake_q_hea
  #define SD_BALANCE_FORK               0x0008  /* Balance on fork, clone */
  #define SD_BALANCE_WAKE               0x0010  /* Balance on wakeup */
  #define SD_WAKE_AFFINE                0x0020  /* Wake task to waking CPU */
 -#define SD_SHARE_CPUCAPACITY  0x0080  /* Domain members share cpu power */
 +#define SD_ASYM_CPUCAPACITY   0x0040  /* Groups have different max cpu capacities */
 +#define SD_SHARE_CPUCAPACITY  0x0080  /* Domain members share cpu capacity */
  #define SD_SHARE_POWERDOMAIN  0x0100  /* Domain members share power domain */
  #define SD_SHARE_PKG_RESOURCES        0x0200  /* Domain members share cpu pkg resources */
  #define SD_SERIALIZE          0x0400  /* Only a single load balancing instance */
@@@ -1924,9 -1925,6 +1926,9 @@@ struct task_struct 
  #ifdef CONFIG_MMU
        struct task_struct *oom_reaper_list;
  #endif
 +#ifdef CONFIG_VMAP_STACK
 +      struct vm_struct *stack_vm_area;
 +#endif
  /* CPU-specific state of this task */
        struct thread_struct thread;
  /*
@@@ -1943,18 -1941,6 +1945,18 @@@ extern int arch_task_struct_size __read
  # define arch_task_struct_size (sizeof(struct task_struct))
  #endif
  
 +#ifdef CONFIG_VMAP_STACK
 +static inline struct vm_struct *task_stack_vm_area(const struct task_struct *t)
 +{
 +      return t->stack_vm_area;
 +}
 +#else
 +static inline struct vm_struct *task_stack_vm_area(const struct task_struct *t)
 +{
 +      return NULL;
 +}
 +#endif
 +
  /* Future-safe accessor for struct task_struct's cpus_allowed. */
  #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
  
@@@ -2848,6 -2834,20 +2850,20 @@@ static inline void mmdrop(struct mm_str
                __mmdrop(mm);
  }
  
+ static inline void mmdrop_async_fn(struct work_struct *work)
+ {
+       struct mm_struct *mm = container_of(work, struct mm_struct, async_put_work);
+       __mmdrop(mm);
+ }
+ static inline void mmdrop_async(struct mm_struct *mm)
+ {
+       if (unlikely(atomic_dec_and_test(&mm->mm_count))) {
+               INIT_WORK(&mm->async_put_work, mmdrop_async_fn);
+               schedule_work(&mm->async_put_work);
+       }
+ }
  static inline bool mmget_not_zero(struct mm_struct *mm)
  {
        return atomic_inc_not_zero(&mm->mm_users);
@@@ -3252,15 -3252,6 +3268,15 @@@ static inline void cond_resched_rcu(voi
  #endif
  }
  
 +static inline unsigned long get_preempt_disable_ip(struct task_struct *p)
 +{
 +#ifdef CONFIG_DEBUG_PREEMPT
 +      return p->preempt_disable_ip;
 +#else
 +      return 0;
 +#endif
 +}
 +
  /*
   * Does a critical section need to be broken due to another
   * task waiting?: (technically does not depend on CONFIG_PREEMPT,
@@@ -3494,19 -3485,15 +3510,19 @@@ static inline unsigned long rlimit_max(
        return task_rlimit_max(current, limit);
  }
  
 +#define SCHED_CPUFREQ_RT      (1U << 0)
 +#define SCHED_CPUFREQ_DL      (1U << 1)
 +
 +#define SCHED_CPUFREQ_RT_DL   (SCHED_CPUFREQ_RT | SCHED_CPUFREQ_DL)
 +
  #ifdef CONFIG_CPU_FREQ
  struct update_util_data {
 -      void (*func)(struct update_util_data *data,
 -                   u64 time, unsigned long util, unsigned long max);
 +       void (*func)(struct update_util_data *data, u64 time, unsigned int flags);
  };
  
  void cpufreq_add_update_util_hook(int cpu, struct update_util_data *data,
 -                      void (*func)(struct update_util_data *data, u64 time,
 -                                   unsigned long util, unsigned long max));
 +                       void (*func)(struct update_util_data *data, u64 time,
 +                                  unsigned int flags));
  void cpufreq_remove_update_util_hook(int cpu);
  #endif /* CONFIG_CPU_FREQ */
  
diff --combined init/main.c
index 2858be732f6d25dd8431cd994645c8c6af3828c2,47bdcc39266dcf71a929de5f84049d2e80d05720..20dacff539072856f7ea700505d36393abb75bc2
@@@ -59,6 -59,7 +59,7 @@@
  #include <linux/pid_namespace.h>
  #include <linux/device.h>
  #include <linux/kthread.h>
+ #include <linux/pagemap.h>
  #include <linux/sched.h>
  #include <linux/signal.h>
  #include <linux/idr.h>
@@@ -463,6 -464,9 +464,9 @@@ void __init __weak thread_stack_cache_i
   */
  static void __init mm_init(void)
  {
+       /* Does address_space.flags still fit into a 32-bit ulong? */
+       BUILD_BUG_ON(AS_LAST_FLAG > 32);
        /*
         * page_ext requires contiguous pages,
         * bigger than MAX_ORDER unless SPARSEMEM.
@@@ -789,7 -793,6 +793,7 @@@ int __init_or_module do_one_initcall(in
        }
        WARN(msgbuf[0], "initcall %pF returned with %s\n", fn, msgbuf);
  
 +      add_latent_entropy();
        return ret;
  }
  
diff --combined kernel/fork.c
index c2ecca44406b130e0ec7abc4d2648beff6738814,690a1aad95e4fa11bbf6b2605bfb7d94bbb666a7..3584f521e3a63d82b6f1827f5f856fb0832d8077
@@@ -158,39 -158,19 +158,39 @@@ void __weak arch_release_thread_stack(u
   * Allocate pages if THREAD_SIZE is >= PAGE_SIZE, otherwise use a
   * kmemcache based allocator.
   */
 -# if THREAD_SIZE >= PAGE_SIZE
 -static unsigned long *alloc_thread_stack_node(struct task_struct *tsk,
 -                                                int node)
 +# if THREAD_SIZE >= PAGE_SIZE || defined(CONFIG_VMAP_STACK)
 +static unsigned long *alloc_thread_stack_node(struct task_struct *tsk, int node)
  {
 +#ifdef CONFIG_VMAP_STACK
 +      void *stack = __vmalloc_node_range(THREAD_SIZE, THREAD_SIZE,
 +                                         VMALLOC_START, VMALLOC_END,
 +                                         THREADINFO_GFP | __GFP_HIGHMEM,
 +                                         PAGE_KERNEL,
 +                                         0, node,
 +                                         __builtin_return_address(0));
 +
 +      /*
 +       * We can't call find_vm_area() in interrupt context, and
 +       * free_thread_stack() can be called in interrupt context,
 +       * so cache the vm_struct.
 +       */
 +      if (stack)
 +              tsk->stack_vm_area = find_vm_area(stack);
 +      return stack;
 +#else
        struct page *page = alloc_pages_node(node, THREADINFO_GFP,
                                             THREAD_SIZE_ORDER);
  
        return page ? page_address(page) : NULL;
 +#endif
  }
  
 -static inline void free_thread_stack(unsigned long *stack)
 +static inline void free_thread_stack(struct task_struct *tsk)
  {
 -      __free_pages(virt_to_page(stack), THREAD_SIZE_ORDER);
 +      if (task_stack_vm_area(tsk))
 +              vfree(tsk->stack);
 +      else
 +              __free_pages(virt_to_page(tsk->stack), THREAD_SIZE_ORDER);
  }
  # else
  static struct kmem_cache *thread_stack_cache;
@@@ -201,9 -181,9 +201,9 @@@ static unsigned long *alloc_thread_stac
        return kmem_cache_alloc_node(thread_stack_cache, THREADINFO_GFP, node);
  }
  
 -static void free_thread_stack(unsigned long *stack)
 +static void free_thread_stack(struct task_struct *tsk)
  {
 -      kmem_cache_free(thread_stack_cache, stack);
 +      kmem_cache_free(thread_stack_cache, tsk->stack);
  }
  
  void thread_stack_cache_init(void)
@@@ -233,47 -213,24 +233,47 @@@ struct kmem_cache *vm_area_cachep
  /* SLAB cache for mm_struct structures (tsk->mm) */
  static struct kmem_cache *mm_cachep;
  
 -static void account_kernel_stack(unsigned long *stack, int account)
 +static void account_kernel_stack(struct task_struct *tsk, int account)
  {
 -      /* All stack pages are in the same zone and belong to the same memcg. */
 -      struct page *first_page = virt_to_page(stack);
 +      void *stack = task_stack_page(tsk);
 +      struct vm_struct *vm = task_stack_vm_area(tsk);
 +
 +      BUILD_BUG_ON(IS_ENABLED(CONFIG_VMAP_STACK) && PAGE_SIZE % 1024 != 0);
  
 -      mod_zone_page_state(page_zone(first_page), NR_KERNEL_STACK_KB,
 -                          THREAD_SIZE / 1024 * account);
 +      if (vm) {
 +              int i;
  
 -      memcg_kmem_update_page_stat(
 -              first_page, MEMCG_KERNEL_STACK_KB,
 -              account * (THREAD_SIZE / 1024));
 +              BUG_ON(vm->nr_pages != THREAD_SIZE / PAGE_SIZE);
 +
 +              for (i = 0; i < THREAD_SIZE / PAGE_SIZE; i++) {
 +                      mod_zone_page_state(page_zone(vm->pages[i]),
 +                                          NR_KERNEL_STACK_KB,
 +                                          PAGE_SIZE / 1024 * account);
 +              }
 +
 +              /* All stack pages belong to the same memcg. */
 +              memcg_kmem_update_page_stat(vm->pages[0], MEMCG_KERNEL_STACK_KB,
 +                                          account * (THREAD_SIZE / 1024));
 +      } else {
 +              /*
 +               * All stack pages are in the same zone and belong to the
 +               * same memcg.
 +               */
 +              struct page *first_page = virt_to_page(stack);
 +
 +              mod_zone_page_state(page_zone(first_page), NR_KERNEL_STACK_KB,
 +                                  THREAD_SIZE / 1024 * account);
 +
 +              memcg_kmem_update_page_stat(first_page, MEMCG_KERNEL_STACK_KB,
 +                                          account * (THREAD_SIZE / 1024));
 +      }
  }
  
  void free_task(struct task_struct *tsk)
  {
 -      account_kernel_stack(tsk->stack, -1);
 +      account_kernel_stack(tsk, -1);
        arch_release_thread_stack(tsk->stack);
 -      free_thread_stack(tsk->stack);
 +      free_thread_stack(tsk);
        rt_mutex_debug_task_free(tsk);
        ftrace_graph_exit_task(tsk);
        put_seccomp_filter(tsk);
@@@ -286,6 -243,12 +286,12 @@@ static inline void free_signal_struct(s
  {
        taskstats_tgid_free(sig);
        sched_autogroup_exit(sig);
+       /*
+        * __mmdrop is not safe to call from softirq context on x86 due to
+        * pgd_dtor so postpone it to the async context
+        */
+       if (sig->oom_mm)
+               mmdrop_async(sig->oom_mm);
        kmem_cache_free(signal_cachep, sig);
  }
  
@@@ -385,7 -348,6 +391,7 @@@ static struct task_struct *dup_task_str
  {
        struct task_struct *tsk;
        unsigned long *stack;
 +      struct vm_struct *stack_vm_area;
        int err;
  
        if (node == NUMA_NO_NODE)
        if (!stack)
                goto free_tsk;
  
 +      stack_vm_area = task_stack_vm_area(tsk);
 +
        err = arch_dup_task_struct(tsk, orig);
 +
 +      /*
 +       * arch_dup_task_struct() clobbers the stack-related fields.  Make
 +       * sure they're properly initialized before using any stack-related
 +       * functions again.
 +       */
 +      tsk->stack = stack;
 +#ifdef CONFIG_VMAP_STACK
 +      tsk->stack_vm_area = stack_vm_area;
 +#endif
 +
        if (err)
                goto free_stack;
  
 -      tsk->stack = stack;
  #ifdef CONFIG_SECCOMP
        /*
         * We must handle setting up seccomp filters once we're under
        tsk->task_frag.page = NULL;
        tsk->wake_q.next = NULL;
  
 -      account_kernel_stack(stack, 1);
 +      account_kernel_stack(tsk, 1);
  
        kcov_task_init(tsk);
  
        return tsk;
  
  free_stack:
 -      free_thread_stack(stack);
 +      free_thread_stack(tsk);
  free_tsk:
        free_task_struct(tsk);
        return NULL;
  }
  
  #ifdef CONFIG_MMU
 -static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
 +static __latent_entropy int dup_mmap(struct mm_struct *mm,
 +                                      struct mm_struct *oldmm)
  {
        struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
        struct rb_node **rb_link, *rb_parent;
@@@ -768,6 -717,7 +774,7 @@@ static inline void __mmput(struct mm_st
        ksm_exit(mm);
        khugepaged_exit(mm); /* must run before exit_mmap */
        exit_mmap(mm);
+       mm_put_huge_zero_page(mm);
        set_mm_exe_file(mm, NULL);
        if (!list_empty(&mm->mmlist)) {
                spin_lock(&mmlist_lock);
        }
        if (mm->binfmt)
                module_put(mm->binfmt->module);
+       set_bit(MMF_OOM_SKIP, &mm->flags);
        mmdrop(mm);
  }
  
@@@ -1353,8 -1304,7 +1361,8 @@@ init_task_pid(struct task_struct *task
   * parts of the process environment (as per the clone
   * flags). The actual kick-off is left to the caller.
   */
 -static struct task_struct *copy_process(unsigned long clone_flags,
 +static __latent_entropy struct task_struct *copy_process(
 +                                      unsigned long clone_flags,
                                        unsigned long stack_start,
                                        unsigned long stack_size,
                                        int __user *child_tidptr,
@@@ -1838,7 -1788,6 +1846,7 @@@ long _do_fork(unsigned long clone_flags
  
        p = copy_process(clone_flags, stack_start, stack_size,
                         child_tidptr, NULL, trace, tls, NUMA_NO_NODE);
 +      add_latent_entropy();
        /*
         * Do this prior waking up the new thread - the thread pointer
         * might get invalid after that point, if the thread exits quickly.
diff --combined kernel/relay.c
index fc9b4a4af463a61732e185cceda2afffc827fd6e,32ce6ccc3218ab315b7b9e8e023236abcce746c1..037be708c24cc0211b2839362c18b0cb04882e40
@@@ -214,7 -214,7 +214,7 @@@ static void relay_destroy_buf(struct rc
                        __free_page(buf->page_array[i]);
                relay_free_page_array(buf->page_array);
        }
 -      chan->buf[buf->cpu] = NULL;
 +      *per_cpu_ptr(chan->buf, buf->cpu) = NULL;
        kfree(buf->padding);
        kfree(buf);
        kref_put(&chan->kref, relay_destroy_channel);
@@@ -328,13 -328,15 +328,15 @@@ static struct rchan_callbacks default_c
  
  /**
   *    wakeup_readers - wake up readers waiting on a channel
-  *    @data: contains the channel buffer
+  *    @work: contains the channel buffer
   *
-  *    This is the timer function used to defer reader waking.
+  *    This is the function used to defer reader waking
   */
- static void wakeup_readers(unsigned long data)
+ static void wakeup_readers(struct irq_work *work)
  {
-       struct rchan_buf *buf = (struct rchan_buf *)data;
+       struct rchan_buf *buf;
+       buf = container_of(work, struct rchan_buf, wakeup_work);
        wake_up_interruptible(&buf->read_wait);
  }
  
@@@ -352,9 -354,10 +354,10 @@@ static void __relay_reset(struct rchan_
        if (init) {
                init_waitqueue_head(&buf->read_wait);
                kref_init(&buf->kref);
-               setup_timer(&buf->timer, wakeup_readers, (unsigned long)buf);
-       } else
-               del_timer_sync(&buf->timer);
+               init_irq_work(&buf->wakeup_work, wakeup_readers);
+       } else {
+               irq_work_sync(&buf->wakeup_work);
+       }
  
        buf->subbufs_produced = 0;
        buf->subbufs_consumed = 0;
   */
  void relay_reset(struct rchan *chan)
  {
 +      struct rchan_buf *buf;
        unsigned int i;
  
        if (!chan)
                return;
  
 -      if (chan->is_global && chan->buf[0]) {
 -              __relay_reset(chan->buf[0], 0);
 +      if (chan->is_global && (buf = *per_cpu_ptr(chan->buf, 0))) {
 +              __relay_reset(buf, 0);
                return;
        }
  
        mutex_lock(&relay_channels_mutex);
        for_each_possible_cpu(i)
 -              if (chan->buf[i])
 -                      __relay_reset(chan->buf[i], 0);
 +              if ((buf = *per_cpu_ptr(chan->buf, i)))
 +                      __relay_reset(buf, 0);
        mutex_unlock(&relay_channels_mutex);
  }
  EXPORT_SYMBOL_GPL(relay_reset);
@@@ -441,7 -443,7 +444,7 @@@ static struct rchan_buf *relay_open_buf
        struct dentry *dentry;
  
        if (chan->is_global)
 -              return chan->buf[0];
 +              return *per_cpu_ptr(chan->buf, 0);
  
        buf = relay_create_buf(chan);
        if (!buf)
        __relay_reset(buf, 1);
  
        if(chan->is_global) {
 -              chan->buf[0] = buf;
 +              *per_cpu_ptr(chan->buf, 0) = buf;
                buf->cpu = 0;
        }
  
@@@ -487,7 -489,7 +490,7 @@@ free_buf
  static void relay_close_buf(struct rchan_buf *buf)
  {
        buf->finalized = 1;
-       del_timer_sync(&buf->timer);
+       irq_work_sync(&buf->wakeup_work);
        buf->chan->cb->remove_buf_file(buf->dentry);
        kref_put(&buf->kref, relay_remove_buf);
  }
@@@ -513,25 -515,46 +516,25 @@@ static void setup_callbacks(struct rcha
        chan->cb = cb;
  }
  
 -/**
 - *    relay_hotcpu_callback - CPU hotplug callback
 - *    @nb: notifier block
 - *    @action: hotplug action to take
 - *    @hcpu: CPU number
 - *
 - *    Returns the success/failure of the operation. (%NOTIFY_OK, %NOTIFY_BAD)
 - */
 -static int relay_hotcpu_callback(struct notifier_block *nb,
 -                              unsigned long action,
 -                              void *hcpu)
 +int relay_prepare_cpu(unsigned int cpu)
  {
 -      unsigned int hotcpu = (unsigned long)hcpu;
        struct rchan *chan;
 +      struct rchan_buf *buf;
  
 -      switch(action) {
 -      case CPU_UP_PREPARE:
 -      case CPU_UP_PREPARE_FROZEN:
 -              mutex_lock(&relay_channels_mutex);
 -              list_for_each_entry(chan, &relay_channels, list) {
 -                      if (chan->buf[hotcpu])
 -                              continue;
 -                      chan->buf[hotcpu] = relay_open_buf(chan, hotcpu);
 -                      if(!chan->buf[hotcpu]) {
 -                              printk(KERN_ERR
 -                                      "relay_hotcpu_callback: cpu %d buffer "
 -                                      "creation failed\n", hotcpu);
 -                              mutex_unlock(&relay_channels_mutex);
 -                              return notifier_from_errno(-ENOMEM);
 -                      }
 +      mutex_lock(&relay_channels_mutex);
 +      list_for_each_entry(chan, &relay_channels, list) {
 +              if ((buf = *per_cpu_ptr(chan->buf, cpu)))
 +                      continue;
 +              buf = relay_open_buf(chan, cpu);
 +              if (!buf) {
 +                      pr_err("relay: cpu %d buffer creation failed\n", cpu);
 +                      mutex_unlock(&relay_channels_mutex);
 +                      return -ENOMEM;
                }
 -              mutex_unlock(&relay_channels_mutex);
 -              break;
 -      case CPU_DEAD:
 -      case CPU_DEAD_FROZEN:
 -              /* No need to flush the cpu : will be flushed upon
 -               * final relay_flush() call. */
 -              break;
 +              *per_cpu_ptr(chan->buf, cpu) = buf;
        }
 -      return NOTIFY_OK;
 +      mutex_unlock(&relay_channels_mutex);
 +      return 0;
  }
  
  /**
@@@ -563,7 -586,6 +566,7 @@@ struct rchan *relay_open(const char *ba
  {
        unsigned int i;
        struct rchan *chan;
 +      struct rchan_buf *buf;
  
        if (!(subbuf_size && n_subbufs))
                return NULL;
        if (!chan)
                return NULL;
  
 +      chan->buf = alloc_percpu(struct rchan_buf *);
        chan->version = RELAYFS_CHANNEL_VERSION;
        chan->n_subbufs = n_subbufs;
        chan->subbuf_size = subbuf_size;
  
        mutex_lock(&relay_channels_mutex);
        for_each_online_cpu(i) {
 -              chan->buf[i] = relay_open_buf(chan, i);
 -              if (!chan->buf[i])
 +              buf = relay_open_buf(chan, i);
 +              if (!buf)
                        goto free_bufs;
 +              *per_cpu_ptr(chan->buf, i) = buf;
        }
        list_add(&chan->list, &relay_channels);
        mutex_unlock(&relay_channels_mutex);
  
  free_bufs:
        for_each_possible_cpu(i) {
 -              if (chan->buf[i])
 -                      relay_close_buf(chan->buf[i]);
 +              if ((buf = *per_cpu_ptr(chan->buf, i)))
 +                      relay_close_buf(buf);
        }
  
        kref_put(&chan->kref, relay_destroy_channel);
@@@ -649,7 -669,6 +652,7 @@@ int relay_late_setup_files(struct rcha
        unsigned int i, curr_cpu;
        unsigned long flags;
        struct dentry *dentry;
 +      struct rchan_buf *buf;
        struct rchan_percpu_buf_dispatcher disp;
  
        if (!chan || !base_filename)
  
        if (chan->is_global) {
                err = -EINVAL;
 -              if (!WARN_ON_ONCE(!chan->buf[0])) {
 -                      dentry = relay_create_buf_file(chan, chan->buf[0], 0);
 +              buf = *per_cpu_ptr(chan->buf, 0);
 +              if (!WARN_ON_ONCE(!buf)) {
 +                      dentry = relay_create_buf_file(chan, buf, 0);
                        if (dentry && !WARN_ON_ONCE(!chan->is_global)) {
 -                              relay_set_buf_dentry(chan->buf[0], dentry);
 +                              relay_set_buf_dentry(buf, dentry);
                                err = 0;
                        }
                }
         * on all currently online CPUs.
         */
        for_each_online_cpu(i) {
 -              if (unlikely(!chan->buf[i])) {
 +              buf = *per_cpu_ptr(chan->buf, i);
 +              if (unlikely(!buf)) {
                        WARN_ONCE(1, KERN_ERR "CPU has no buffer!\n");
                        err = -EINVAL;
                        break;
                }
  
 -              dentry = relay_create_buf_file(chan, chan->buf[i], i);
 +              dentry = relay_create_buf_file(chan, buf, i);
                if (unlikely(!dentry)) {
                        err = -EINVAL;
                        break;
  
                if (curr_cpu == i) {
                        local_irq_save(flags);
 -                      relay_set_buf_dentry(chan->buf[i], dentry);
 +                      relay_set_buf_dentry(buf, dentry);
                        local_irq_restore(flags);
                } else {
 -                      disp.buf = chan->buf[i];
 +                      disp.buf = buf;
                        disp.dentry = dentry;
                        smp_mb();
                        /* relay_channels_mutex must be held, so wait. */
@@@ -754,14 -771,15 +757,15 @@@ size_t relay_switch_subbuf(struct rchan
                        buf->early_bytes += buf->chan->subbuf_size -
                                            buf->padding[old_subbuf];
                smp_mb();
-               if (waitqueue_active(&buf->read_wait))
+               if (waitqueue_active(&buf->read_wait)) {
                        /*
                         * Calling wake_up_interruptible() from here
                         * will deadlock if we happen to be logging
                         * from the scheduler (trying to re-grab
                         * rq->lock), so defer it.
                         */
-                       mod_timer(&buf->timer, jiffies + 1);
+                       irq_work_queue(&buf->wakeup_work);
+               }
        }
  
        old = buf->data;
@@@ -808,10 -826,11 +812,10 @@@ void relay_subbufs_consumed(struct rcha
        if (!chan)
                return;
  
 -      if (cpu >= NR_CPUS || !chan->buf[cpu] ||
 -                                      subbufs_consumed > chan->n_subbufs)
 +      buf = *per_cpu_ptr(chan->buf, cpu);
 +      if (cpu >= NR_CPUS || !buf || subbufs_consumed > chan->n_subbufs)
                return;
  
 -      buf = chan->buf[cpu];
        if (subbufs_consumed > buf->subbufs_produced - buf->subbufs_consumed)
                buf->subbufs_consumed = buf->subbufs_produced;
        else
@@@ -827,19 -846,18 +831,19 @@@ EXPORT_SYMBOL_GPL(relay_subbufs_consume
   */
  void relay_close(struct rchan *chan)
  {
 +      struct rchan_buf *buf;
        unsigned int i;
  
        if (!chan)
                return;
  
        mutex_lock(&relay_channels_mutex);
 -      if (chan->is_global && chan->buf[0])
 -              relay_close_buf(chan->buf[0]);
 +      if (chan->is_global && (buf = *per_cpu_ptr(chan->buf, 0)))
 +              relay_close_buf(buf);
        else
                for_each_possible_cpu(i)
 -                      if (chan->buf[i])
 -                              relay_close_buf(chan->buf[i]);
 +                      if ((buf = *per_cpu_ptr(chan->buf, i)))
 +                              relay_close_buf(buf);
  
        if (chan->last_toobig)
                printk(KERN_WARNING "relay: one or more items not logged "
@@@ -860,21 -878,20 +864,21 @@@ EXPORT_SYMBOL_GPL(relay_close)
   */
  void relay_flush(struct rchan *chan)
  {
 +      struct rchan_buf *buf;
        unsigned int i;
  
        if (!chan)
                return;
  
 -      if (chan->is_global && chan->buf[0]) {
 -              relay_switch_subbuf(chan->buf[0], 0);
 +      if (chan->is_global && (buf = *per_cpu_ptr(chan->buf, 0))) {
 +              relay_switch_subbuf(buf, 0);
                return;
        }
  
        mutex_lock(&relay_channels_mutex);
        for_each_possible_cpu(i)
 -              if (chan->buf[i])
 -                      relay_switch_subbuf(chan->buf[i], 0);
 +              if ((buf = *per_cpu_ptr(chan->buf, i)))
 +                      relay_switch_subbuf(buf, 0);
        mutex_unlock(&relay_channels_mutex);
  }
  EXPORT_SYMBOL_GPL(relay_flush);
@@@ -1364,3 -1381,12 +1368,3 @@@ const struct file_operations relay_file
        .splice_read    = relay_file_splice_read,
  };
  EXPORT_SYMBOL_GPL(relay_file_operations);
 -
 -static __init int relay_init(void)
 -{
 -
 -      hotcpu_notifier(relay_hotcpu_callback, 0);
 -      return 0;
 -}
 -
 -early_initcall(relay_init);
diff --combined mm/huge_memory.c
index a6abd76baa725d56eb1e9adeb8ee1b6fca86cb80,fc0d37e357c328411992ab229ae63c94a9e368ed..d76700d280879f68a3457effb4978de19a51d39b
@@@ -59,7 -59,7 +59,7 @@@ static struct shrinker deferred_split_s
  static atomic_t huge_zero_refcount;
  struct page *huge_zero_page __read_mostly;
  
- struct page *get_huge_zero_page(void)
+ static struct page *get_huge_zero_page(void)
  {
        struct page *zero_page;
  retry:
@@@ -86,7 -86,7 +86,7 @@@
        return READ_ONCE(huge_zero_page);
  }
  
- void put_huge_zero_page(void)
static void put_huge_zero_page(void)
  {
        /*
         * Counter should never go to zero here. Only shrinker can put
        BUG_ON(atomic_dec_and_test(&huge_zero_refcount));
  }
  
+ struct page *mm_get_huge_zero_page(struct mm_struct *mm)
+ {
+       if (test_bit(MMF_HUGE_ZERO_PAGE, &mm->flags))
+               return READ_ONCE(huge_zero_page);
+       if (!get_huge_zero_page())
+               return NULL;
+       if (test_and_set_bit(MMF_HUGE_ZERO_PAGE, &mm->flags))
+               put_huge_zero_page();
+       return READ_ONCE(huge_zero_page);
+ }
+ void mm_put_huge_zero_page(struct mm_struct *mm)
+ {
+       if (test_bit(MMF_HUGE_ZERO_PAGE, &mm->flags))
+               put_huge_zero_page();
+ }
  static unsigned long shrink_huge_zero_page_count(struct shrinker *shrink,
                                        struct shrink_control *sc)
  {
@@@ -469,6 -489,49 +489,49 @@@ void prep_transhuge_page(struct page *p
        set_compound_page_dtor(page, TRANSHUGE_PAGE_DTOR);
  }
  
+ unsigned long __thp_get_unmapped_area(struct file *filp, unsigned long len,
+               loff_t off, unsigned long flags, unsigned long size)
+ {
+       unsigned long addr;
+       loff_t off_end = off + len;
+       loff_t off_align = round_up(off, size);
+       unsigned long len_pad;
+       if (off_end <= off_align || (off_end - off_align) < size)
+               return 0;
+       len_pad = len + size;
+       if (len_pad < len || (off + len_pad) < off)
+               return 0;
+       addr = current->mm->get_unmapped_area(filp, 0, len_pad,
+                                             off >> PAGE_SHIFT, flags);
+       if (IS_ERR_VALUE(addr))
+               return 0;
+       addr += (off - addr) & (size - 1);
+       return addr;
+ }
+ unsigned long thp_get_unmapped_area(struct file *filp, unsigned long addr,
+               unsigned long len, unsigned long pgoff, unsigned long flags)
+ {
+       loff_t off = (loff_t)pgoff << PAGE_SHIFT;
+       if (addr)
+               goto out;
+       if (!IS_DAX(filp->f_mapping->host) || !IS_ENABLED(CONFIG_FS_DAX_PMD))
+               goto out;
+       addr = __thp_get_unmapped_area(filp, len, off, flags, PMD_SIZE);
+       if (addr)
+               return addr;
+  out:
+       return current->mm->get_unmapped_area(filp, addr, len, pgoff, flags);
+ }
+ EXPORT_SYMBOL_GPL(thp_get_unmapped_area);
  static int __do_huge_pmd_anonymous_page(struct fault_env *fe, struct page *page,
                gfp_t gfp)
  {
@@@ -601,7 -664,7 +664,7 @@@ int do_huge_pmd_anonymous_page(struct f
                pgtable = pte_alloc_one(vma->vm_mm, haddr);
                if (unlikely(!pgtable))
                        return VM_FAULT_OOM;
-               zero_page = get_huge_zero_page();
+               zero_page = mm_get_huge_zero_page(vma->vm_mm);
                if (unlikely(!zero_page)) {
                        pte_free(vma->vm_mm, pgtable);
                        count_vm_event(THP_FAULT_FALLBACK);
                        }
                } else
                        spin_unlock(fe->ptl);
-               if (!set) {
+               if (!set)
                        pte_free(vma->vm_mm, pgtable);
-                       put_huge_zero_page();
-               }
                return ret;
        }
        gfp = alloc_hugepage_direct_gfpmask(vma);
@@@ -780,7 -841,7 +841,7 @@@ int copy_huge_pmd(struct mm_struct *dst
                 * since we already have a zero page to copy. It just takes a
                 * reference.
                 */
-               zero_page = get_huge_zero_page();
+               zero_page = mm_get_huge_zero_page(dst_mm);
                set_huge_zero_page(pgtable, dst_mm, vma, addr, dst_pmd,
                                zero_page);
                ret = 0;
@@@ -1038,7 -1099,6 +1099,6 @@@ alloc
                update_mmu_cache_pmd(vma, fe->address, fe->pmd);
                if (!page) {
                        add_mm_counter(vma->vm_mm, MM_ANONPAGES, HPAGE_PMD_NR);
-                       put_huge_zero_page();
                } else {
                        VM_BUG_ON_PAGE(!PageHead(page), page);
                        page_remove_rmap(page, true);
@@@ -1078,7 -1138,7 +1138,7 @@@ struct page *follow_trans_huge_pmd(stru
                goto out;
  
        page = pmd_page(*pmd);
 -      VM_BUG_ON_PAGE(!PageHead(page), page);
 +      VM_BUG_ON_PAGE(!PageHead(page) && !is_zone_device_page(page), page);
        if (flags & FOLL_TOUCH)
                touch_pmd(vma, addr, pmd);
        if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) {
        }
  skip_mlock:
        page += (addr & ~HPAGE_PMD_MASK) >> PAGE_SHIFT;
 -      VM_BUG_ON_PAGE(!PageCompound(page), page);
 +      VM_BUG_ON_PAGE(!PageCompound(page) && !is_zone_device_page(page), page);
        if (flags & FOLL_GET)
                get_page(page);
  
@@@ -1502,7 -1562,6 +1562,6 @@@ static void __split_huge_zero_page_pmd(
        }
        smp_wmb(); /* make pte visible before pmd */
        pmd_populate(mm, pmd, pgtable);
-       put_huge_zero_page();
  }
  
  static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
  
        if (!vma_is_anonymous(vma)) {
                _pmd = pmdp_huge_clear_flush_notify(vma, haddr, pmd);
-               if (is_huge_zero_pmd(_pmd))
-                       put_huge_zero_page();
                if (vma_is_dax(vma))
                        return;
                page = pmd_page(_pmd);
diff --combined mm/page-writeback.c
index 28d6f36a2d79c26c30e25d52d01a8dc95dbaa4ac,67b7c8b55889606075aa9ab84594bdfee9f62cd1..439cc63ad903fb898192188367670efad4a2d1fa
@@@ -1965,36 -1965,6 +1965,6 @@@ bool wb_over_bg_thresh(struct bdi_write
        return false;
  }
  
- void throttle_vm_writeout(gfp_t gfp_mask)
- {
-       unsigned long background_thresh;
-       unsigned long dirty_thresh;
-         for ( ; ; ) {
-               global_dirty_limits(&background_thresh, &dirty_thresh);
-               dirty_thresh = hard_dirty_limit(&global_wb_domain, dirty_thresh);
-                 /*
-                  * Boost the allowable dirty threshold a bit for page
-                  * allocators so they don't get DoS'ed by heavy writers
-                  */
-                 dirty_thresh += dirty_thresh / 10;      /* wheeee... */
-                 if (global_node_page_state(NR_UNSTABLE_NFS) +
-                       global_node_page_state(NR_WRITEBACK) <= dirty_thresh)
-                               break;
-                 congestion_wait(BLK_RW_ASYNC, HZ/10);
-               /*
-                * The caller might hold locks which can prevent IO completion
-                * or progress in the filesystem.  So we cannot just sit here
-                * waiting for IO to complete.
-                */
-               if ((gfp_mask & (__GFP_FS|__GFP_IO)) != (__GFP_FS|__GFP_IO))
-                       break;
-         }
- }
  /*
   * sysctl handler for /proc/sys/vm/dirty_writeback_centisecs
   */
@@@ -2080,12 -2050,26 +2050,12 @@@ void writeback_set_ratelimit(void
                ratelimit_pages = 16;
  }
  
 -static int
 -ratelimit_handler(struct notifier_block *self, unsigned long action,
 -                void *hcpu)
 +static int page_writeback_cpu_online(unsigned int cpu)
  {
 -
 -      switch (action & ~CPU_TASKS_FROZEN) {
 -      case CPU_ONLINE:
 -      case CPU_DEAD:
 -              writeback_set_ratelimit();
 -              return NOTIFY_OK;
 -      default:
 -              return NOTIFY_DONE;
 -      }
 +      writeback_set_ratelimit();
 +      return 0;
  }
  
 -static struct notifier_block ratelimit_nb = {
 -      .notifier_call  = ratelimit_handler,
 -      .next           = NULL,
 -};
 -
  /*
   * Called early on to tune the page writeback dirty limits.
   *
@@@ -2108,10 -2092,8 +2078,10 @@@ void __init page_writeback_init(void
  {
        BUG_ON(wb_domain_init(&global_wb_domain, GFP_KERNEL));
  
 -      writeback_set_ratelimit();
 -      register_cpu_notifier(&ratelimit_nb);
 +      cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "mm/writeback:online",
 +                        page_writeback_cpu_online, NULL);
 +      cpuhp_setup_state(CPUHP_MM_WRITEBACK_DEAD, "mm/writeback:dead", NULL,
 +                        page_writeback_cpu_online);
  }
  
  /**
@@@ -2746,7 -2728,7 +2716,7 @@@ int test_clear_page_writeback(struct pa
        int ret;
  
        lock_page_memcg(page);
-       if (mapping) {
+       if (mapping && mapping_use_writeback_tags(mapping)) {
                struct inode *inode = mapping->host;
                struct backing_dev_info *bdi = inode_to_bdi(inode);
                unsigned long flags;
@@@ -2789,7 -2771,7 +2759,7 @@@ int __test_set_page_writeback(struct pa
        int ret;
  
        lock_page_memcg(page);
-       if (mapping) {
+       if (mapping && mapping_use_writeback_tags(mapping)) {
                struct inode *inode = mapping->host;
                struct backing_dev_info *bdi = inode_to_bdi(inode);
                unsigned long flags;
diff --combined mm/page_alloc.c
index 41940f6e3c1c07eec3467f41de4e5337e5d4b654,ce03c930ffcf169923c111169061280524aead86..721d62c5be69977bc595f9cd8d8d9e5ce618ea26
@@@ -64,7 -64,6 +64,7 @@@
  #include <linux/page_owner.h>
  #include <linux/kthread.h>
  #include <linux/memcontrol.h>
 +#include <linux/random.h>
  
  #include <asm/sections.h>
  #include <asm/tlbflush.h>
@@@ -92,11 -91,6 +92,11 @@@ EXPORT_PER_CPU_SYMBOL(_numa_mem_)
  int _node_numa_mem_[MAX_NUMNODES];
  #endif
  
 +#ifdef CONFIG_GCC_PLUGIN_LATENT_ENTROPY
 +volatile u64 latent_entropy __latent_entropy;
 +EXPORT_SYMBOL(latent_entropy);
 +#endif
 +
  /*
   * Array of node states.
   */
@@@ -260,7 -254,7 +260,7 @@@ int watermark_scale_factor = 10
  
  static unsigned long __meminitdata nr_kernel_pages;
  static unsigned long __meminitdata nr_all_pages;
- static unsigned long __meminitdata dma_reserve;
+ static unsigned long __meminitdata nr_memory_reserve;
  
  #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
  static unsigned long __meminitdata arch_zone_lowest_possible_pfn[MAX_NR_ZONES];
@@@ -613,6 -607,9 +613,9 @@@ static bool need_debug_guardpage(void
        if (!debug_pagealloc_enabled())
                return false;
  
+       if (!debug_guardpage_minorder())
+               return false;
        return true;
  }
  
@@@ -621,6 -618,9 +624,9 @@@ static void init_debug_guardpage(void
        if (!debug_pagealloc_enabled())
                return;
  
+       if (!debug_guardpage_minorder())
+               return;
        _debug_guardpage_enabled = true;
  }
  
@@@ -641,19 -641,22 +647,22 @@@ static int __init debug_guardpage_minor
        pr_info("Setting debug_guardpage_minorder to %lu\n", res);
        return 0;
  }
__setup("debug_guardpage_minorder=", debug_guardpage_minorder_setup);
early_param("debug_guardpage_minorder", debug_guardpage_minorder_setup);
  
- static inline void set_page_guard(struct zone *zone, struct page *page,
+ static inline bool set_page_guard(struct zone *zone, struct page *page,
                                unsigned int order, int migratetype)
  {
        struct page_ext *page_ext;
  
        if (!debug_guardpage_enabled())
-               return;
+               return false;
+       if (order >= debug_guardpage_minorder())
+               return false;
  
        page_ext = lookup_page_ext(page);
        if (unlikely(!page_ext))
-               return;
+               return false;
  
        __set_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags);
  
        set_page_private(page, order);
        /* Guard pages are not available for any usage */
        __mod_zone_freepage_state(zone, -(1 << order), migratetype);
+       return true;
  }
  
  static inline void clear_page_guard(struct zone *zone, struct page *page,
                __mod_zone_freepage_state(zone, (1 << order), migratetype);
  }
  #else
- struct page_ext_operations debug_guardpage_ops = { NULL, };
- static inline void set_page_guard(struct zone *zone, struct page *page,
-                               unsigned int order, int migratetype) {}
+ struct page_ext_operations debug_guardpage_ops;
+ static inline bool set_page_guard(struct zone *zone, struct page *page,
+                       unsigned int order, int migratetype) { return false; }
  static inline void clear_page_guard(struct zone *zone, struct page *page,
                                unsigned int order, int migratetype) {}
  #endif
@@@ -1240,15 -1245,6 +1251,15 @@@ static void __free_pages_ok(struct pag
        local_irq_restore(flags);
  }
  
 +bool __meminitdata ram_latent_entropy;
 +
 +static int __init setup_ram_latent_entropy(char *str)
 +{
 +      ram_latent_entropy = true;
 +      return 0;
 +}
 +early_param("ram_latent_entropy", setup_ram_latent_entropy);
 +
  static void __init __free_pages_boot_core(struct page *page, unsigned int order)
  {
        unsigned int nr_pages = 1 << order;
        __ClearPageReserved(p);
        set_page_count(p, 0);
  
 +      if (ram_latent_entropy && !PageHighMem(page) &&
 +              page_to_pfn(page) < 0x100000) {
 +              u64 hash = 0;
 +              size_t index, end = PAGE_SIZE * nr_pages / sizeof(hash);
 +              const u64 *data = lowmem_page_address(page);
 +
 +              for (index = 0; index < end; index++)
 +                      hash ^= hash + data[index];
 +              add_device_randomness((const void *)&hash, sizeof(hash));
 +      }
 +
        page_zone(page)->managed_pages += nr_pages;
        set_page_refcounted(page);
        __free_pages(page, order);
@@@ -1419,15 -1404,18 +1430,18 @@@ static void __init deferred_free_range(
                return;
  
        /* Free a large naturally-aligned chunk if possible */
-       if (nr_pages == MAX_ORDER_NR_PAGES &&
-           (pfn & (MAX_ORDER_NR_PAGES-1)) == 0) {
+       if (nr_pages == pageblock_nr_pages &&
+           (pfn & (pageblock_nr_pages - 1)) == 0) {
                set_pageblock_migratetype(page, MIGRATE_MOVABLE);
-               __free_pages_boot_core(page, MAX_ORDER-1);
+               __free_pages_boot_core(page, pageblock_order);
                return;
        }
  
-       for (i = 0; i < nr_pages; i++, page++)
+       for (i = 0; i < nr_pages; i++, page++, pfn++) {
+               if ((pfn & (pageblock_nr_pages - 1)) == 0)
+                       set_pageblock_migratetype(page, MIGRATE_MOVABLE);
                __free_pages_boot_core(page, 0);
+       }
  }
  
  /* Completion tracking for deferred_init_memmap() threads */
@@@ -1495,9 -1483,9 +1509,9 @@@ static int __init deferred_init_memmap(
  
                        /*
                         * Ensure pfn_valid is checked every
-                        * MAX_ORDER_NR_PAGES for memory holes
+                        * pageblock_nr_pages for memory holes
                         */
-                       if ((pfn & (MAX_ORDER_NR_PAGES - 1)) == 0) {
+                       if ((pfn & (pageblock_nr_pages - 1)) == 0) {
                                if (!pfn_valid(pfn)) {
                                        page = NULL;
                                        goto free_range;
                        }
  
                        /* Minimise pfn page lookups and scheduler checks */
-                       if (page && (pfn & (MAX_ORDER_NR_PAGES - 1)) != 0) {
+                       if (page && (pfn & (pageblock_nr_pages - 1)) != 0) {
                                page++;
                        } else {
                                nr_pages += nr_to_free;
@@@ -1546,6 -1534,9 +1560,9 @@@ free_range
                        free_base_page = NULL;
                        free_base_pfn = nr_to_free = 0;
                }
+               /* Free the last block of pages to allocator */
+               nr_pages += nr_to_free;
+               deferred_free_range(free_base_page, free_base_pfn, nr_to_free);
  
                first_init_pfn = max(end_pfn, first_init_pfn);
        }
@@@ -1642,18 -1633,15 +1659,15 @@@ static inline void expand(struct zone *
                size >>= 1;
                VM_BUG_ON_PAGE(bad_range(zone, &page[size]), &page[size]);
  
-               if (IS_ENABLED(CONFIG_DEBUG_PAGEALLOC) &&
-                       debug_guardpage_enabled() &&
-                       high < debug_guardpage_minorder()) {
-                       /*
-                        * Mark as guard pages (or page), that will allow to
-                        * merge back to allocator when buddy will be freed.
-                        * Corresponding page table entries will not be touched,
-                        * pages will stay not present in virtual address space
-                        */
-                       set_page_guard(zone, &page[size], high, migratetype);
+               /*
+                * Mark as guard pages (or page), that will allow to
+                * merge back to allocator when buddy will be freed.
+                * Corresponding page table entries will not be touched,
+                * pages will stay not present in virtual address space
+                */
+               if (set_page_guard(zone, &page[size], high, migratetype))
                        continue;
-               }
                list_add(&page[size].lru, &area->free_list[migratetype]);
                area->nr_free++;
                set_page_order(&page[size], high);
@@@ -2515,9 -2503,14 +2529,14 @@@ int __isolate_free_page(struct page *pa
        mt = get_pageblock_migratetype(page);
  
        if (!is_migrate_isolate(mt)) {
-               /* Obey watermarks as if the page was being allocated */
-               watermark = low_wmark_pages(zone) + (1 << order);
-               if (!zone_watermark_ok(zone, 0, watermark, 0, 0))
+               /*
+                * Obey watermarks as if the page was being allocated. We can
+                * emulate a high-order watermark check with a raised order-0
+                * watermark, because we already know our high-order page
+                * exists.
+                */
+               watermark = min_wmark_pages(zone) + (1UL << order);
+               if (!zone_watermark_ok(zone, 0, watermark, 0, ALLOC_CMA))
                        return 0;
  
                __mod_zone_freepage_state(zone, -(1UL << order), mt);
@@@ -3163,6 -3156,61 +3182,61 @@@ __alloc_pages_direct_compact(gfp_t gfp_
        return NULL;
  }
  
+ static inline bool
+ should_compact_retry(struct alloc_context *ac, int order, int alloc_flags,
+                    enum compact_result compact_result,
+                    enum compact_priority *compact_priority,
+                    int compaction_retries)
+ {
+       int max_retries = MAX_COMPACT_RETRIES;
+       int min_priority;
+       if (!order)
+               return false;
+       /*
+        * compaction considers all the zone as desperately out of memory
+        * so it doesn't really make much sense to retry except when the
+        * failure could be caused by insufficient priority
+        */
+       if (compaction_failed(compact_result))
+               goto check_priority;
+       /*
+        * make sure the compaction wasn't deferred or didn't bail out early
+        * due to locks contention before we declare that we should give up.
+        * But do not retry if the given zonelist is not suitable for
+        * compaction.
+        */
+       if (compaction_withdrawn(compact_result))
+               return compaction_zonelist_suitable(ac, order, alloc_flags);
+       /*
+        * !costly requests are much more important than __GFP_REPEAT
+        * costly ones because they are de facto nofail and invoke OOM
+        * killer to move on while costly can fail and users are ready
+        * to cope with that. 1/4 retries is rather arbitrary but we
+        * would need much more detailed feedback from compaction to
+        * make a better decision.
+        */
+       if (order > PAGE_ALLOC_COSTLY_ORDER)
+               max_retries /= 4;
+       if (compaction_retries <= max_retries)
+               return true;
+       /*
+        * Make sure there is at least one attempt at the highest priority
+        * if we exhausted all retries at the lower priorities
+        */
+ check_priority:
+       min_priority = (order > PAGE_ALLOC_COSTLY_ORDER) ?
+                       MIN_COMPACT_COSTLY_PRIORITY : MIN_COMPACT_PRIORITY;
+       if (*compact_priority > min_priority) {
+               (*compact_priority)--;
+               return true;
+       }
+       return false;
+ }
  #else
  static inline struct page *
  __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
        return NULL;
  }
  
- #endif /* CONFIG_COMPACTION */
  static inline bool
  should_compact_retry(struct alloc_context *ac, unsigned int order, int alloc_flags,
                     enum compact_result compact_result,
        }
        return false;
  }
+ #endif /* CONFIG_COMPACTION */
  
  /* Perform direct synchronous page reclaim */
  static int
@@@ -4581,7 -4628,7 +4654,7 @@@ static void build_zonelists_in_node_ord
        int j;
        struct zonelist *zonelist;
  
-       zonelist = &pgdat->node_zonelists[0];
+       zonelist = &pgdat->node_zonelists[ZONELIST_FALLBACK];
        for (j = 0; zonelist->_zonerefs[j].zone != NULL; j++)
                ;
        j = build_zonelists_node(NODE_DATA(node), zonelist, j);
@@@ -4597,7 -4644,7 +4670,7 @@@ static void build_thisnode_zonelists(pg
        int j;
        struct zonelist *zonelist;
  
-       zonelist = &pgdat->node_zonelists[1];
+       zonelist = &pgdat->node_zonelists[ZONELIST_NOFALLBACK];
        j = build_zonelists_node(pgdat, zonelist, 0);
        zonelist->_zonerefs[j].zone = NULL;
        zonelist->_zonerefs[j].zone_idx = 0;
@@@ -4618,7 -4665,7 +4691,7 @@@ static void build_zonelists_in_zone_ord
        struct zone *z;
        struct zonelist *zonelist;
  
-       zonelist = &pgdat->node_zonelists[0];
+       zonelist = &pgdat->node_zonelists[ZONELIST_FALLBACK];
        pos = 0;
        for (zone_type = MAX_NR_ZONES - 1; zone_type >= 0; zone_type--) {
                for (j = 0; j < nr_nodes; j++) {
@@@ -4753,7 -4800,7 +4826,7 @@@ static void build_zonelists(pg_data_t *
  
        local_node = pgdat->node_id;
  
-       zonelist = &pgdat->node_zonelists[0];
+       zonelist = &pgdat->node_zonelists[ZONELIST_FALLBACK];
        j = build_zonelists_node(pgdat, zonelist, 0);
  
        /*
@@@ -5025,15 -5072,6 +5098,6 @@@ void __meminit memmap_init_zone(unsigne
                        break;
  
  #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
-               /*
-                * If not mirrored_kernelcore and ZONE_MOVABLE exists, range
-                * from zone_movable_pfn[nid] to end of each node should be
-                * ZONE_MOVABLE not ZONE_NORMAL. skip it.
-                */
-               if (!mirrored_kernelcore && zone_movable_pfn[nid])
-                       if (zone == ZONE_NORMAL && pfn >= zone_movable_pfn[nid])
-                               continue;
                /*
                 * Check given memblock attribute by firmware which can affect
                 * kernel memory layout.  If zone==ZONE_MOVABLE but memory is
@@@ -5477,6 -5515,12 +5541,12 @@@ static void __meminit adjust_zone_range
                        *zone_end_pfn = min(node_end_pfn,
                                arch_zone_highest_possible_pfn[movable_zone]);
  
+               /* Adjust for ZONE_MOVABLE starting within this range */
+               } else if (!mirrored_kernelcore &&
+                       *zone_start_pfn < zone_movable_pfn[nid] &&
+                       *zone_end_pfn > zone_movable_pfn[nid]) {
+                       *zone_end_pfn = zone_movable_pfn[nid];
                /* Check if this whole range is within ZONE_MOVABLE */
                } else if (*zone_start_pfn >= zone_movable_pfn[nid])
                        *zone_start_pfn = *zone_end_pfn;
@@@ -5580,28 -5624,23 +5650,23 @@@ static unsigned long __meminit zone_abs
         * Treat pages to be ZONE_MOVABLE in ZONE_NORMAL as absent pages
         * and vice versa.
         */
-       if (zone_movable_pfn[nid]) {
-               if (mirrored_kernelcore) {
-                       unsigned long start_pfn, end_pfn;
-                       struct memblock_region *r;
-                       for_each_memblock(memory, r) {
-                               start_pfn = clamp(memblock_region_memory_base_pfn(r),
-                                                 zone_start_pfn, zone_end_pfn);
-                               end_pfn = clamp(memblock_region_memory_end_pfn(r),
-                                               zone_start_pfn, zone_end_pfn);
-                               if (zone_type == ZONE_MOVABLE &&
-                                   memblock_is_mirror(r))
-                                       nr_absent += end_pfn - start_pfn;
-                               if (zone_type == ZONE_NORMAL &&
-                                   !memblock_is_mirror(r))
-                                       nr_absent += end_pfn - start_pfn;
-                       }
-               } else {
-                       if (zone_type == ZONE_NORMAL)
-                               nr_absent += node_end_pfn - zone_movable_pfn[nid];
+       if (mirrored_kernelcore && zone_movable_pfn[nid]) {
+               unsigned long start_pfn, end_pfn;
+               struct memblock_region *r;
+               for_each_memblock(memory, r) {
+                       start_pfn = clamp(memblock_region_memory_base_pfn(r),
+                                         zone_start_pfn, zone_end_pfn);
+                       end_pfn = clamp(memblock_region_memory_end_pfn(r),
+                                       zone_start_pfn, zone_end_pfn);
+                       if (zone_type == ZONE_MOVABLE &&
+                           memblock_is_mirror(r))
+                               nr_absent += end_pfn - start_pfn;
+                       if (zone_type == ZONE_NORMAL &&
+                           !memblock_is_mirror(r))
+                               nr_absent += end_pfn - start_pfn;
                }
        }
  
@@@ -5838,10 -5877,10 +5903,10 @@@ static void __paginginit free_area_init
                }
  
                /* Account for reserved pages */
-               if (j == 0 && freesize > dma_reserve) {
-                       freesize -= dma_reserve;
+               if (j == 0 && freesize > nr_memory_reserve) {
+                       freesize -= nr_memory_reserve;
                        printk(KERN_DEBUG "  %s zone: %lu pages reserved\n",
-                                       zone_names[0], dma_reserve);
+                                       zone_names[0], nr_memory_reserve);
                }
  
                if (!is_highmem_idx(j))
@@@ -6527,8 -6566,9 +6592,9 @@@ void __init mem_init_print_info(const c
  }
  
  /**
-  * set_dma_reserve - set the specified number of pages reserved in the first zone
-  * @new_dma_reserve: The number of pages to mark reserved
+  * set_memory_reserve - set number of pages reserved in the first zone
+  * @nr_reserve: The number of pages to mark reserved
+  * @inc: true increment to existing value; false set new value.
   *
   * The per-cpu batchsize and zone watermarks are determined by managed_pages.
   * In the DMA zone, a significant percentage may be consumed by kernel image
   * first zone (e.g., ZONE_DMA). The effect will be lower watermarks and
   * smaller per-cpu batchsize.
   */
- void __init set_dma_reserve(unsigned long new_dma_reserve)
+ void __init set_memory_reserve(unsigned long nr_reserve, bool inc)
  {
-       dma_reserve = new_dma_reserve;
+       if (inc)
+               nr_memory_reserve += nr_reserve;
+       else
+               nr_memory_reserve = nr_reserve;
  }
  
  void __init free_area_init(unsigned long *zones_size)
@@@ -6955,6 -6998,17 +7024,17 @@@ static int __init set_hashdist(char *st
  __setup("hashdist=", set_hashdist);
  #endif
  
+ #ifndef __HAVE_ARCH_RESERVED_KERNEL_PAGES
+ /*
+  * Returns the number of pages that arch has reserved but
+  * is not known to alloc_large_system_hash().
+  */
+ static unsigned long __init arch_reserved_kernel_pages(void)
+ {
+       return 0;
+ }
+ #endif
  /*
   * allocate a large system hash table from bootmem
   * - it is assumed that the hash table must contain an exact power-of-2
@@@ -6979,6 -7033,7 +7059,7 @@@ void *__init alloc_large_system_hash(co
        if (!numentries) {
                /* round applicable memory size up to nearest megabyte */
                numentries = nr_kernel_pages;
+               numentries -= arch_reserved_kernel_pages();
  
                /* It isn't necessary when PAGE_SIZE >= 1MB */
                if (PAGE_SHIFT < 20)
diff --combined mm/slab.c
index 090fb26b3a39b4feba105650f2a663f5c9972064,042017e511da50d3d3186fc88c1367437d67a1e8..f2b8fb9a6ecca560f1cdff0338db31438b8d535c
+++ b/mm/slab.c
@@@ -233,6 -233,7 +233,7 @@@ static void kmem_cache_node_init(struc
        spin_lock_init(&parent->list_lock);
        parent->free_objects = 0;
        parent->free_touched = 0;
+       parent->num_slabs = 0;
  }
  
  #define MAKE_LIST(cachep, listp, slab, nodeid)                                \
@@@ -886,7 -887,6 +887,7 @@@ static int init_cache_node(struct kmem_
        return 0;
  }
  
 +#if (defined(CONFIG_NUMA) && defined(CONFIG_MEMORY_HOTPLUG)) || defined(CONFIG_SMP)
  /*
   * Allocates and initializes node for a node on each slab cache, used for
   * either memory or cpu hotplug.  If memory is being hot-added, the kmem_cache_node
@@@ -909,7 -909,6 +910,7 @@@ static int init_cache_node_node(int nod
  
        return 0;
  }
 +#endif
  
  static int setup_kmem_cache_node(struct kmem_cache *cachep,
                                int node, gfp_t gfp, bool force_change)
@@@ -977,8 -976,6 +978,8 @@@ fail
        return ret;
  }
  
 +#ifdef CONFIG_SMP
 +
  static void cpuup_canceled(long cpu)
  {
        struct kmem_cache *cachep;
        return -ENOMEM;
  }
  
 -static int cpuup_callback(struct notifier_block *nfb,
 -                                  unsigned long action, void *hcpu)
 +int slab_prepare_cpu(unsigned int cpu)
  {
 -      long cpu = (long)hcpu;
 -      int err = 0;
 +      int err;
  
 -      switch (action) {
 -      case CPU_UP_PREPARE:
 -      case CPU_UP_PREPARE_FROZEN:
 -              mutex_lock(&slab_mutex);
 -              err = cpuup_prepare(cpu);
 -              mutex_unlock(&slab_mutex);
 -              break;
 -      case CPU_ONLINE:
 -      case CPU_ONLINE_FROZEN:
 -              start_cpu_timer(cpu);
 -              break;
 -#ifdef CONFIG_HOTPLUG_CPU
 -      case CPU_DOWN_PREPARE:
 -      case CPU_DOWN_PREPARE_FROZEN:
 -              /*
 -               * Shutdown cache reaper. Note that the slab_mutex is
 -               * held so that if cache_reap() is invoked it cannot do
 -               * anything expensive but will only modify reap_work
 -               * and reschedule the timer.
 -              */
 -              cancel_delayed_work_sync(&per_cpu(slab_reap_work, cpu));
 -              /* Now the cache_reaper is guaranteed to be not running. */
 -              per_cpu(slab_reap_work, cpu).work.func = NULL;
 -              break;
 -      case CPU_DOWN_FAILED:
 -      case CPU_DOWN_FAILED_FROZEN:
 -              start_cpu_timer(cpu);
 -              break;
 -      case CPU_DEAD:
 -      case CPU_DEAD_FROZEN:
 -              /*
 -               * Even if all the cpus of a node are down, we don't free the
 -               * kmem_cache_node of any cache. This to avoid a race between
 -               * cpu_down, and a kmalloc allocation from another cpu for
 -               * memory from the node of the cpu going down.  The node
 -               * structure is usually allocated from kmem_cache_create() and
 -               * gets destroyed at kmem_cache_destroy().
 -               */
 -              /* fall through */
 +      mutex_lock(&slab_mutex);
 +      err = cpuup_prepare(cpu);
 +      mutex_unlock(&slab_mutex);
 +      return err;
 +}
 +
 +/*
 + * This is called for a failed online attempt and for a successful
 + * offline.
 + *
 + * Even if all the cpus of a node are down, we don't free the
 + * kmem_list3 of any cache. This to avoid a race between cpu_down, and
 + * a kmalloc allocation from another cpu for memory from the node of
 + * the cpu going down.  The list3 structure is usually allocated from
 + * kmem_cache_create() and gets destroyed at kmem_cache_destroy().
 + */
 +int slab_dead_cpu(unsigned int cpu)
 +{
 +      mutex_lock(&slab_mutex);
 +      cpuup_canceled(cpu);
 +      mutex_unlock(&slab_mutex);
 +      return 0;
 +}
  #endif
 -      case CPU_UP_CANCELED:
 -      case CPU_UP_CANCELED_FROZEN:
 -              mutex_lock(&slab_mutex);
 -              cpuup_canceled(cpu);
 -              mutex_unlock(&slab_mutex);
 -              break;
 -      }
 -      return notifier_from_errno(err);
 +
 +static int slab_online_cpu(unsigned int cpu)
 +{
 +      start_cpu_timer(cpu);
 +      return 0;
  }
  
 -static struct notifier_block cpucache_notifier = {
 -      &cpuup_callback, NULL, 0
 -};
 +static int slab_offline_cpu(unsigned int cpu)
 +{
 +      /*
 +       * Shutdown cache reaper. Note that the slab_mutex is held so
 +       * that if cache_reap() is invoked it cannot do anything
 +       * expensive but will only modify reap_work and reschedule the
 +       * timer.
 +       */
 +      cancel_delayed_work_sync(&per_cpu(slab_reap_work, cpu));
 +      /* Now the cache_reaper is guaranteed to be not running. */
 +      per_cpu(slab_reap_work, cpu).work.func = NULL;
 +      return 0;
 +}
  
  #if defined(CONFIG_NUMA) && defined(CONFIG_MEMORY_HOTPLUG)
  /*
@@@ -1329,6 -1337,12 +1330,6 @@@ void __init kmem_cache_init_late(void
        /* Done! */
        slab_state = FULL;
  
 -      /*
 -       * Register a cpu startup notifier callback that initializes
 -       * cpu_cache_get for all new cpus
 -       */
 -      register_cpu_notifier(&cpucache_notifier);
 -
  #ifdef CONFIG_NUMA
        /*
         * Register a memory hotplug callback that initializes and frees
  
  static int __init cpucache_init(void)
  {
 -      int cpu;
 +      int ret;
  
        /*
         * Register the timers that return unneeded pages to the page allocator
         */
 -      for_each_online_cpu(cpu)
 -              start_cpu_timer(cpu);
 +      ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "SLAB online",
 +                              slab_online_cpu, slab_offline_cpu);
 +      WARN_ON(ret < 0);
  
        /* Done! */
        slab_state = FULL;
@@@ -1382,24 -1395,27 +1383,27 @@@ slab_out_of_memory(struct kmem_cache *c
        for_each_kmem_cache_node(cachep, node, n) {
                unsigned long active_objs = 0, num_objs = 0, free_objects = 0;
                unsigned long active_slabs = 0, num_slabs = 0;
+               unsigned long num_slabs_partial = 0, num_slabs_free = 0;
+               unsigned long num_slabs_full;
  
                spin_lock_irqsave(&n->list_lock, flags);
-               list_for_each_entry(page, &n->slabs_full, lru) {
-                       active_objs += cachep->num;
-                       active_slabs++;
-               }
+               num_slabs = n->num_slabs;
                list_for_each_entry(page, &n->slabs_partial, lru) {
                        active_objs += page->active;
-                       active_slabs++;
+                       num_slabs_partial++;
                }
                list_for_each_entry(page, &n->slabs_free, lru)
-                       num_slabs++;
+                       num_slabs_free++;
  
                free_objects += n->free_objects;
                spin_unlock_irqrestore(&n->list_lock, flags);
  
-               num_slabs += active_slabs;
                num_objs = num_slabs * cachep->num;
+               active_slabs = num_slabs - num_slabs_free;
+               num_slabs_full = num_slabs -
+                       (num_slabs_partial + num_slabs_free);
+               active_objs += (num_slabs_full * cachep->num);
                pr_warn("  node %d: slabs: %ld/%ld, objs: %ld/%ld, free: %ld\n",
                        node, active_slabs, num_slabs, active_objs, num_objs,
                        free_objects);
@@@ -2314,6 -2330,7 +2318,7 @@@ static int drain_freelist(struct kmem_c
  
                page = list_entry(p, struct page, lru);
                list_del(&page->lru);
+               n->num_slabs--;
                /*
                 * Safe to drop the lock. The slab is no longer linked
                 * to the cache.
@@@ -2752,6 -2769,8 +2757,8 @@@ static void cache_grow_end(struct kmem_
                list_add_tail(&page->lru, &(n->slabs_free));
        else
                fixup_slab_list(cachep, n, page, &list);
+       n->num_slabs++;
        STATS_INC_GROWN(cachep);
        n->free_objects += cachep->num - page->active;
        spin_unlock(&n->list_lock);
@@@ -3443,6 -3462,7 +3450,7 @@@ static void free_block(struct kmem_cach
  
                page = list_last_entry(&n->slabs_free, struct page, lru);
                list_move(&page->lru, list);
+               n->num_slabs--;
        }
  }
  
@@@ -4099,6 -4119,8 +4107,8 @@@ void get_slabinfo(struct kmem_cache *ca
        unsigned long num_objs;
        unsigned long active_slabs = 0;
        unsigned long num_slabs, free_objects = 0, shared_avail = 0;
+       unsigned long num_slabs_partial = 0, num_slabs_free = 0;
+       unsigned long num_slabs_full = 0;
        const char *name;
        char *error = NULL;
        int node;
                check_irq_on();
                spin_lock_irq(&n->list_lock);
  
-               list_for_each_entry(page, &n->slabs_full, lru) {
-                       if (page->active != cachep->num && !error)
-                               error = "slabs_full accounting error";
-                       active_objs += cachep->num;
-                       active_slabs++;
-               }
+               num_slabs += n->num_slabs;
                list_for_each_entry(page, &n->slabs_partial, lru) {
                        if (page->active == cachep->num && !error)
                                error = "slabs_partial accounting error";
                        if (!page->active && !error)
                                error = "slabs_partial accounting error";
                        active_objs += page->active;
-                       active_slabs++;
+                       num_slabs_partial++;
                }
                list_for_each_entry(page, &n->slabs_free, lru) {
                        if (page->active && !error)
                                error = "slabs_free accounting error";
-                       num_slabs++;
+                       num_slabs_free++;
                }
                free_objects += n->free_objects;
                if (n->shared)
                        shared_avail += n->shared->avail;
  
                spin_unlock_irq(&n->list_lock);
        }
-       num_slabs += active_slabs;
        num_objs = num_slabs * cachep->num;
+       active_slabs = num_slabs - num_slabs_free;
+       num_slabs_full = num_slabs - (num_slabs_partial + num_slabs_free);
+       active_objs += (num_slabs_full * cachep->num);
        if (num_objs - active_objs != free_objects && !error)
                error = "free_objects accounting error";
  
This page took 0.115892 seconds and 5 git commands to generate.